code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case = parse(importlib.metadata.version("""torch"""))
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Version] , snake_case__ :str , snake_case__ :str ) -> int:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(snake_case__ , snake_case__ ):
_lowercase = parse(importlib.metadata.version(snake_case__ ) )
return operation(snake_case__ , parse(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> Dict:
return compare_versions(snake_case__ , snake_case__ , snake_case__ ) | 67 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ :Dict = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "bart"
A = ["past_key_values"]
A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , SCREAMING_SNAKE_CASE__=5_02_65 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = vocab_size
_UpperCamelCase :Union[str, Any] = max_position_embeddings
_UpperCamelCase :int = d_model
_UpperCamelCase :Any = encoder_ffn_dim
_UpperCamelCase :str = encoder_layers
_UpperCamelCase :str = encoder_attention_heads
_UpperCamelCase :Optional[int] = decoder_ffn_dim
_UpperCamelCase :Any = decoder_layers
_UpperCamelCase :Dict = decoder_attention_heads
_UpperCamelCase :str = dropout
_UpperCamelCase :Tuple = attention_dropout
_UpperCamelCase :Optional[Any] = activation_dropout
_UpperCamelCase :Optional[Any] = activation_function
_UpperCamelCase :int = init_std
_UpperCamelCase :Any = encoder_layerdrop
_UpperCamelCase :List[Any] = decoder_layerdrop
_UpperCamelCase :Union[str, Any] = classifier_dropout
_UpperCamelCase :List[Any] = use_cache
_UpperCamelCase :str = encoder_layers
_UpperCamelCase :int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Any = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
class A( lowerCamelCase__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase :Optional[Any] = {0: '''batch'''}
_UpperCamelCase :int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
_UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCamelCase :Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase , _UpperCamelCase :List[str] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
_UpperCamelCase :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_UpperCamelCase :List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :Optional[int] = super().outputs
else:
_UpperCamelCase :List[str] = super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
_UpperCamelCase , _UpperCamelCase :int = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_UpperCamelCase :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
_UpperCamelCase :List[Any] = seq_length if not self.use_past else 1
_UpperCamelCase :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_UpperCamelCase :str = dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase :Any = common_inputs['''input_ids'''].shape
_UpperCamelCase :Dict = common_inputs['''decoder_input_ids'''].shape[1]
_UpperCamelCase , _UpperCamelCase :Any = self.num_attention_heads
_UpperCamelCase :Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase :Tuple = decoder_seq_length + 3
_UpperCamelCase :Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCamelCase :Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
_UpperCamelCase :Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCamelCase , _UpperCamelCase :int = self.num_layers
_UpperCamelCase :Any = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
_UpperCamelCase :int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
_UpperCamelCase :Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase :List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase :str = seqlen + 2
_UpperCamelCase , _UpperCamelCase :Dict = self.num_layers
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = self.num_attention_heads
_UpperCamelCase :List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase :Tuple = common_inputs['''attention_mask'''].dtype
_UpperCamelCase :List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
_UpperCamelCase :int = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase :str = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase :List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCamelCase :Optional[Any] = dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
elif self.task == "causal-lm":
_UpperCamelCase :Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
_UpperCamelCase :Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase :Union[str, Any] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_UpperCamelCase :Tuple = super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 355 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "".join(chr(ord(SCREAMING_SNAKE_CASE ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 714 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Optional[Any] = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
__UpperCamelCase :List[str] = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
__UpperCamelCase :Optional[int] = shift_tokens_right(__lowercase , model.config.pad_token_id , model.config.decoder_start_token_id)
__UpperCamelCase :Tuple = model(__lowercase , decoder_input_ids=__lowercase).logits
__UpperCamelCase :Any = optax.softmax_cross_entropy(__lowercase , onehot(__lowercase , logits.shape[-1])).mean()
__UpperCamelCase :str = -(labels.shape[-1] * loss.item())
__UpperCamelCase :Optional[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 452 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : int = (DPMSolverSDEScheduler,)
a_ : Any = 10
def _lowerCAmelCase ( self , **A ):
_lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**A )
return config
def _lowerCAmelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _lowerCAmelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def _lowerCAmelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def _lowerCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : List[str] = self.dummy_model()
_lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : List[str] = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Optional[Any] = scheduler.scale_model_input(A , A )
_lowerCamelCase : Dict = model(A , A )
_lowerCamelCase : Optional[int] = scheduler.step(A , A , A )
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : List[Any] = torch.sum(torch.abs(A ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCamelCase : List[str] = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : Dict = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : List[str] = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Dict = scheduler.scale_model_input(A , A )
_lowerCamelCase : Dict = model(A , A )
_lowerCamelCase : Optional[int] = scheduler.step(A , A , A )
_lowerCamelCase : Any = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(A ) )
_lowerCamelCase : Any = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Any = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
_lowerCamelCase : Union[str, Any] = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(A , A )
_lowerCamelCase : Optional[Any] = model(A , A )
_lowerCamelCase : List[str] = scheduler.step(A , A , A )
_lowerCamelCase : Union[str, Any] = output.prev_sample
_lowerCamelCase : str = torch.sum(torch.abs(A ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**A , use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
_lowerCamelCase : List[str] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
_lowerCamelCase : Tuple = sample.to(A )
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(A , A )
_lowerCamelCase : Any = model(A , A )
_lowerCamelCase : str = scheduler.step(A , A , A )
_lowerCamelCase : Any = output.prev_sample
_lowerCamelCase : str = torch.sum(torch.abs(A ) )
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 437 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase__ ( A__=None ) -> List[Any]:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase__ = subparsers.add_parser("test" )
else:
lowerCamelCase__ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def UpperCAmelCase__ ( A__ ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
lowerCamelCase__ = script_name
else:
lowerCamelCase__ = f'--config_file={args.config_file} {script_name}'
lowerCamelCase__ = ["accelerate-launch"] + test_args.split()
lowerCamelCase__ = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = test_command_parser()
lowerCamelCase__ = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main()
| 274 |
"""simple docstring"""
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 274 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = int(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCamelCase_ = F'''{elt:.6f}''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else str(UpperCAmelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
"""simple docstring"""
_lowerCamelCase = 5
_lowerCamelCase = 0.2
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 300 , ):
"""simple docstring"""
lowerCamelCase_ = total
lowerCamelCase_ = "" if prefix is None else prefix
lowerCamelCase_ = leave
lowerCamelCase_ = parent
lowerCamelCase_ = width
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = value
if comment is not None:
lowerCamelCase_ = comment
if self.last_value is None:
lowerCamelCase_ = lowerCamelCase_ = time.time()
lowerCamelCase_ = lowerCamelCase_ = value
lowerCamelCase_ = lowerCamelCase_ = None
lowerCamelCase_ = self.warmup
lowerCamelCase_ = 1
self.update_bar(UpperCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowerCamelCase_ = time.time()
lowerCamelCase_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCamelCase_ = self.elapsed_time / (value - self.start_value)
else:
lowerCamelCase_ = None
if value >= self.total:
lowerCamelCase_ = self.total
lowerCamelCase_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCamelCase_ = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCamelCase )
lowerCamelCase_ = value
lowerCamelCase_ = current_time
if self.average_time_per_item is None:
lowerCamelCase_ = 1
else:
lowerCamelCase_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = " " * (len(str(self.total ) ) - len(str(UpperCamelCase ) )) + str(UpperCamelCase )
if self.elapsed_time is None:
lowerCamelCase_ = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
lowerCamelCase_ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
lowerCamelCase_ = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCamelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCamelCase_ = None if column_names is None else [column_names]
lowerCamelCase_ = None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCamelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.inner_table is None:
lowerCamelCase_ = [list(values.keys() ), list(values.values() )]
else:
lowerCamelCase_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCamelCase )
lowerCamelCase_ = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=300 ):
"""simple docstring"""
lowerCamelCase_ = NotebookProgressBar(UpperCamelCase , prefix=UpperCamelCase , parent=self , width=UpperCamelCase )
return self.child_bar
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = None
self.display()
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = False
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
lowerCamelCase_ = NotebookTrainingTracker(state.max_steps , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
lowerCamelCase_ = False
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
if not has_length(UpperCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCamelCase_ = self.training_tracker.add_child(len(UpperCamelCase ) )
else:
lowerCamelCase_ = NotebookProgressBar(len(UpperCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCamelCase_ = None
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCamelCase_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCamelCase_ = state.global_step
self.training_tracker.write_line(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
if self.training_tracker is not None:
lowerCamelCase_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
lowerCamelCase_ = log["loss"]
break
if self.first_column == "Epoch":
lowerCamelCase_ = int(state.epoch )
else:
lowerCamelCase_ = state.global_step
lowerCamelCase_ = "eval"
for k in metrics:
if k.endswith("_loss" ):
lowerCamelCase_ = re.sub(r"\_loss$" , "" , UpperCamelCase )
lowerCamelCase_ = metrics.pop("total_flos" , UpperCamelCase )
lowerCamelCase_ = metrics.pop("epoch" , UpperCamelCase )
lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_runtime''' , UpperCamelCase )
lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , UpperCamelCase )
lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , UpperCamelCase )
lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , UpperCamelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
lowerCamelCase_ = v
else:
lowerCamelCase_ = k.split("_" )
lowerCamelCase_ = " ".join([part.capitalize() for part in splits[1:]] )
lowerCamelCase_ = v
self.training_tracker.write_line(UpperCamelCase )
self.training_tracker.remove_child()
lowerCamelCase_ = None
# Evaluation takes a long time so we should force the next update.
lowerCamelCase_ = True
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCamelCase )
lowerCamelCase_ = None
| 675 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowercase ):
"""simple docstring"""
_lowerCamelCase = ["onnx"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 675 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def _A ( snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : List[Any]=None ):
# Recurse if needed
if "." in tensor_name:
snake_case__ : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case__ : str = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
snake_case__ : Tuple = new_module
snake_case__ : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
snake_case__ : Union[str, Any] = tensor_name in module._buffers
snake_case__ : Optional[Any] = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
snake_case__ : Any = False
snake_case__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
snake_case__ : str = False
snake_case__ : List[Any] = False
else:
snake_case__ : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case__ : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case__ : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case__ : int = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
snake_case__ : List[Any] = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case__ : str = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case__ : Tuple = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
snake_case__ : int = new_value.T
snake_case__ : Union[str, Any] = old_value.__dict__
if is_abit:
snake_case__ : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
snake_case__ : Any = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
snake_case__ : int = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
snake_case__ : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
snake_case__ : Union[str, Any] = value.to(snake_case__ )
else:
snake_case__ : Any = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
snake_case__ : List[str] = new_value
else:
snake_case__ : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
snake_case__ : List[str] = new_value
def _A ( snake_case__ : int , snake_case__ : str=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , snake_case__ : Any=False ):
for name, module in model.named_children():
if current_key_name is None:
snake_case__ : int = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
snake_case__ ,snake_case__ : Dict = module.weight.shape
else:
snake_case__ : str = module.in_features
snake_case__ : str = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case__ : Union[str, Any] = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case__ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case__ : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case__ : Tuple = True
# Store the module class in case we need to transpose the weight later
snake_case__ : Optional[Any] = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
snake_case__ ,snake_case__ : Any = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( snake_case__ : Optional[int] , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : int=None ):
snake_case__ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case__ ,snake_case__ : List[Any] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _A ( *snake_case__ : Optional[Any] , **snake_case__ : int ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def _A ( *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def _A ( snake_case__ : str ):
snake_case__ : Dict = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case__ : List[Any] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
snake_case__ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case__ : Union[str, Any] = sum(snake_case__ , [] )
snake_case__ : List[str] = len(snake_case__ ) > 0
# Check if it is a base model
snake_case__ : Tuple = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case__ : List[Any] = list(model.named_children() )
snake_case__ : Any = [list_modules[-1][0]]
# add last module together with tied weights
snake_case__ : str = set(snake_case__ ) - set(snake_case__ )
snake_case__ : List[str] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
snake_case__ : Tuple = ['''.weight''', '''.bias''']
snake_case__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case__ : List[Any] = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 701 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Dict , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :str ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Tuple , **__A :Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Optional[Any] , **__A :Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :List[Any] , *__A :Any , **__A :List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[str] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Tuple , **__A :str ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :List[str] , **__A :Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Any , **__A :List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :List[Any] , **__A :int ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :Dict , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Tuple , **__A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[int] , **__A :Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Any , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :int , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :str , *__A :int , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :List[Any] , **__A :Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Optional[Any] , **__A :str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :str , **__A :int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[Any] , **__A :Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :List[str] , **__A :int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :List[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :Optional[Any] , **__A :List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[Any] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[int] , **__A :List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :Optional[Any] , **__A :List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Optional[Any] , **__A :Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :str , **__A :str ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :List[str] , **__A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[Any] , **__A :Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Optional[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Tuple , *__A :Dict , **__A :List[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[int] , *__A :Any , **__A :int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Tuple , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] ) | 6 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=1_6 , lowerCamelCase_=3_6 , lowerCamelCase_=6 , lowerCamelCase_=6 , lowerCamelCase_=6 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Dict:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : List[str] = is_training
_a : Tuple = use_input_mask
_a : Dict = use_token_type_ids
_a : List[str] = use_labels
_a : Optional[int] = vocab_size
_a : int = embedding_size
_a : Tuple = hidden_size
_a : str = num_hidden_layers
_a : List[str] = num_hidden_groups
_a : Union[str, Any] = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Dict = type_vocab_size
_a : Tuple = type_sequence_label_size
_a : Dict = initializer_range
_a : str = num_labels
_a : Optional[Any] = num_choices
_a : Optional[int] = scope
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[str] = None
if self.use_input_mask:
_a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_a : Dict = None
if self.use_token_type_ids:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : List[str] = None
_a : Optional[int] = None
_a : List[Any] = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : str = ids_tensor([self.batch_size] , self.num_choices )
_a : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_a : str = AlbertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_a : Dict = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_a : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_a : Optional[int] = AlbertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , sentence_order_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_a : Union[str, Any] = AlbertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_a : Union[str, Any] = AlbertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_a : str = self.num_labels
_a : str = AlbertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_a : Optional[int] = self.num_labels
_a : Any = AlbertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_a : Optional[int] = self.num_choices
_a : Any = AlbertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[int] = config_and_inputs
_a : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[Any] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Optional[int] = True
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]:
_a : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
_a : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
_a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __UpperCamelCase ( self ) -> List[Any]:
_a : int = AlbertModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Tuple:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __UpperCamelCase ( self ) -> Dict:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = AlbertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : Dict = AlbertModel.from_pretrained('albert-base-v2' )
_a : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_a : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_a : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase_ )
_a : int = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) )
| 120 | 0 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
A: int = "path-to-your-trained-model"
A: int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
A: str = "A photo of sks dog in a bucket"
A: List[str] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 359 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = XLMTokenizer
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """lower newer"""
UpperCAmelCase : Dict = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase : Optional[int] = """lower"""
UpperCAmelCase : Optional[int] = ["""low""", """er</w>"""]
UpperCAmelCase : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCAmelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 359 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A_ : Any = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a__ = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ = field(default=_a , metadata={"help": "Whether tp freeze the encoder."} )
a__ = field(default=_a , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
a__ = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
a__ = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
a__ = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
a__ = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
a__ = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
a__ = field(default=_a , metadata={"help": "Source language id for translation."} )
a__ = field(default=_a , metadata={"help": "Target language id for translation."} )
a__ = field(default=_a , metadata={"help": "# num_beams to use for evaluation."} )
a__ = field(
default=_a , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , f'''{split}_results.json''' ) )
def __a ( ) -> List[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__UpperCAmelCase = SeqaSeqDataset
# Get datasets
__UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
__UpperCAmelCase = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__UpperCAmelCase = train_result.metrics
__UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate(metric_key_prefix='''val''' )
__UpperCAmelCase = data_args.n_val
__UpperCAmelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__UpperCAmelCase = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE , metric_key_prefix='''test''' )
__UpperCAmelCase = test_output.metrics
__UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
__UpperCAmelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
__UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = lmap(str.strip , SCREAMING_SNAKE_CASE )
write_txt_file(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 303 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 303 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'trajectory_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , a_ : List[str]=100 , a_ : str=5 , a_ : Optional[Any]=1 , a_ : str=1 , a_ : List[str]=249 , a_ : List[Any]=6 , a_ : Tuple=17 , a_ : Any=25 , a_ : Optional[Any]=4 , a_ : Tuple=4 , a_ : int=128 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : str=0.0006 , a_ : Optional[Any]=512 , a_ : Optional[int]=0.02 , a_ : Optional[Any]=1e-1_2 , a_ : Any=1 , a_ : int=True , a_ : Optional[int]=1 , a_ : Optional[Any]=5_0256 , a_ : Dict=5_0256 , **a_ : Optional[Any] , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = action_weight
SCREAMING_SNAKE_CASE__ : int = reward_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = value_weight
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = block_size
SCREAMING_SNAKE_CASE__ : int = action_dim
SCREAMING_SNAKE_CASE__ : Any = observation_dim
SCREAMING_SNAKE_CASE__ : Tuple = transition_dim
SCREAMING_SNAKE_CASE__ : int = learning_rate
SCREAMING_SNAKE_CASE__ : str = n_layer
SCREAMING_SNAKE_CASE__ : Dict = n_head
SCREAMING_SNAKE_CASE__ : List[str] = n_embd
SCREAMING_SNAKE_CASE__ : int = embd_pdrop
SCREAMING_SNAKE_CASE__ : str = attn_pdrop
SCREAMING_SNAKE_CASE__ : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ : int = use_cache
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
| 636 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=64 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=64 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = MPNetModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = MPNetForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MPNetForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_choices
A__ = MPNetForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MPNetForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Any = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[Any] = True
def __A ( self ):
A__ = MPNetModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self ):
A__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A__ = model(UpperCAmelCase__ )[0]
A__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 491 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline
lowerCAmelCase : Dict = ["""prompt"""]
lowerCAmelCase : List[Any] = ["""prompt""", """negative_prompt"""]
lowerCAmelCase : Optional[int] = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase : Dict = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
A__ = PriorTransformer(**UpperCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A__ = CLIPVisionModelWithProjection(UpperCAmelCase__ )
return model
@property
def __A ( self ):
A__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def __A ( self ):
A__ = self.dummy_prior
A__ = self.dummy_image_encoder
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_image_processor
A__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , clip_sample_range=10.0 , )
A__ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
A__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
A__ = output.image_embeds
A__ = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -10:]
A__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = True
A__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
| 491 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
return {}
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : Tuple = '''<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '''
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase_ ( __a , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE : Any = get_html_strings()[0]
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
SCREAMING_SNAKE_CASE : str = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = get_html_strings()
SCREAMING_SNAKE_CASE : int = feature_extractor(lowerCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
SCREAMING_SNAKE_CASE : List[Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
| 719 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=5_12 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : List[Any] ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __lowercase ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , *lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.seq_length // 2
SCREAMING_SNAKE_CASE : Any = 0
# first forward pass
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((1,) , lowerCAmelCase__ ).item() + 1
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : str = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase__ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = BioGptModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
# first forward pass
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : Any , lowerCAmelCase__ : int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase ( self : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , *lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = BioGptForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : List[str] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Dict = False
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __lowercase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase__ , gradient_checkpointing=lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase__ )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Dict = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Any = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = inputs['''input_ids'''].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCAmelCase__ ) , )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Dict = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : List[Any] = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = 4_23_84
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
**lowerCAmelCase__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 464 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : Dict ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : int , **lowerCamelCase : Dict ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : Tuple , **lowerCamelCase : Union[str, Any] ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return tf.__version__
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 133 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : Dict ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : int , **lowerCamelCase : Dict ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : Tuple , **lowerCamelCase : Union[str, Any] ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return tf.__version__
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 133 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__=None, **lowerCamelCase__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A : Optional[Any] = model
A : Any = kwargs.get("""model_save_dir""", lowerCamelCase__ )
A : Union[str, Any] = kwargs.get("""latest_model_name""", lowerCamelCase__ )
def __call__( self, **lowerCamelCase__ ):
A : Union[str, Any] = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase__, lowerCamelCase__ )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A : Optional[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCamelCase__, providers=[provider], sess_options=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__ ):
A : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A : Union[str, Any] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__, lowerCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A : int = self.model_save_dir.joinpath(lowerCamelCase__ )
if src_path.exists():
A : List[Any] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__, lowerCamelCase__ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self, lowerCamelCase__, **lowerCamelCase__, ):
if os.path.isfile(lowerCamelCase__ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
# saving model weights/files
self._save_pretrained(lowerCamelCase__, **lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase__ ):
A : List[str] = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase__, lowerCamelCase__ ), provider=lowerCamelCase__, sess_options=lowerCamelCase__ )
A : Dict = Path(lowerCamelCase__ )
# load model from hub
else:
# download model
A : str = hf_hub_download(
repo_id=lowerCamelCase__, filename=lowerCamelCase__, use_auth_token=lowerCamelCase__, revision=lowerCamelCase__, cache_dir=lowerCamelCase__, force_download=lowerCamelCase__, )
A : Optional[int] = Path(lowerCamelCase__ ).parent
A : Any = Path(lowerCamelCase__ ).name
A : Any = OnnxRuntimeModel.load_model(lowerCamelCase__, provider=lowerCamelCase__, sess_options=lowerCamelCase__ )
return cls(model=lowerCamelCase__, **lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = None
if len(str(lowerCamelCase__ ).split("""@""" ) ) == 2:
A , A : List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCamelCase__, revision=lowerCamelCase__, cache_dir=lowerCamelCase__, force_download=lowerCamelCase__, use_auth_token=lowerCamelCase__, **lowerCamelCase__, )
| 520 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : str = size if size is not None else {"""shortest_edge""": 384}
A : List[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : str = do_resize
A : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Any = resample
A : List[Any] = do_rescale
A : Union[str, Any] = rescale_factor
A : Tuple = do_normalize
A : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : int = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : Optional[int] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : List[str] = do_resize if do_resize is not None else self.do_resize
A : str = crop_pct if crop_pct is not None else self.crop_pct
A : List[str] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Dict = size if size is not None else self.size
A : Dict = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Dict = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : List[str] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Any = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 520 | 1 |
import numpy as np
def a__ ( lowercase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def a__ ( lowercase__ ):
'''simple docstring'''
return vector * sigmoid(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase: Optional[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Any = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = ("DownEncoderBlock2D",) ,UpperCAmelCase_ = ("UpDecoderBlock2D",) ,UpperCAmelCase_ = (64,) ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0.18215 ,UpperCAmelCase_ = "group" ,):
super().__init__()
# pass init params to Encoder
_lowercase : List[Any] = Encoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,down_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,double_z=UpperCAmelCase_ ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : int = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
_lowercase : Union[str, Any] = VectorQuantizer(UpperCAmelCase_ ,UpperCAmelCase_ ,beta=0.25 ,remap=UpperCAmelCase_ ,sane_index_shape=UpperCAmelCase_ )
_lowercase : Union[str, Any] = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
# pass init params to Decoder
_lowercase : Union[str, Any] = Decoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,up_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,norm_type=UpperCAmelCase_ ,)
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Any = self.encoder(UpperCAmelCase_ )
_lowercase : List[Any] = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ):
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Union[str, Any] = self.quantize(UpperCAmelCase_ )
else:
_lowercase : int = h
_lowercase : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_ )
_lowercase : List[Any] = self.decoder(UpperCAmelCase_ ,quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : List[Any] = sample
_lowercase : Optional[Any] = self.encode(UpperCAmelCase_ ).latents
_lowercase : int = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 600 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase ='src/transformers'
__UpperCAmelCase ='docs/source/en/tasks'
def __a ( A , A , A ) -> Tuple:
'''simple docstring'''
with open(_A , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start prompt.
A__ = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
A__ = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase =direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __a ( A ) -> Optional[Any]:
'''simple docstring'''
A__ = TASK_GUIDE_TO_MODELS[task_guide]
A__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_A , set() )
A__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __a ( A , A=False ) -> List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = _find_text_in_file(
filename=os.path.join(_A , _A ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
A__ = get_model_list_for_task(_A )
if current_list != new_list:
if overwrite:
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__UpperCAmelCase =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 337 | def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not isinstance(_A ,_A ):
_lowercase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
_lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0 ):
lowerCAmelCase : List[str] = row, column
lowerCAmelCase : List[str] = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )]
def __str__( self : str ):
lowerCAmelCase : List[str] = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase : Dict = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase : Dict = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) )
lowerCAmelCase : Optional[int] = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCamelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase : List[str] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ):
return str(self )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : tuple[int, int] ):
if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : str , UpperCamelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCamelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : float ):
assert self.validate_indicies(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = value
def __add__( self : str , UpperCamelCase_ : Matrix ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self : str ):
lowerCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Any = -self[r, c]
return result
def __sub__( self : Dict , UpperCamelCase_ : Matrix ):
return self + (-another)
def __mul__( self : str , UpperCamelCase_ : int | float | Matrix ):
if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication
lowerCAmelCase : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : Tuple = self[r, c] * another
return result
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase : int = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase : Dict = F'''Unsupported type given for another ({type(UpperCamelCase_ )})'''
raise TypeError(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase : List[str] = self[r, c]
return result
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Matrix , UpperCamelCase_ : Matrix ):
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase : Optional[int] = v.transpose()
lowerCAmelCase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _snake_case ( ):
# a^(-1)
lowerCAmelCase : List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase : Union[str, Any] = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase : Dict = Matrix(3 , 1 , 0 )
lowerCAmelCase : Tuple = 1, 2, -3
lowerCAmelCase : List[str] = Matrix(3 , 1 , 0 )
lowerCAmelCase : Any = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}''' )
def _snake_case ( ):
import doctest
doctest.testmod()
testa()
| 706 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637 | 0 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case = datasets.utils.logging.get_logger(__name__)
_snake_case = ['names', 'prefix']
_snake_case = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_snake_case = ['encoding_errors', 'on_bad_lines']
_snake_case = ['date_format']
@dataclass
class a__ ( datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : str = ","
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int], str]] = "infer"
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, str, List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[Literal["c", "python", "pyarrow"]] = None
_SCREAMING_SNAKE_CASE : Dict[Union[int, str], Callable[[Any], Any]] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int]]] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = "."
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = '"'
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : int = 1_0000
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
_SCREAMING_SNAKE_CASE : Optional[str] = "strict"
_SCREAMING_SNAKE_CASE : Literal["error", "warn", "skip"] = "error"
_SCREAMING_SNAKE_CASE : Optional[str] = None
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.delimiter is not None:
_lowercase : Optional[Any] = self.delimiter
if self.column_names is not None:
_lowercase : str = self.column_names
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a__ ( datasets.ArrowBasedBuilder ):
_SCREAMING_SNAKE_CASE : int = CsvConfig
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
_lowercase : List[Any] = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Union[str, Any] = [files]
_lowercase : int = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_lowercase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Tuple = [files]
_lowercase : Tuple = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.config.features is not None:
_lowercase : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowercase : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowercase : Optional[int] = table_cast(_UpperCamelCase , _UpperCamelCase )
return pa_table
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowercase : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
_lowercase : List[str] = pd.read_csv(_UpperCamelCase , iterator=_UpperCamelCase , dtype=_UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCamelCase ):
_lowercase : Optional[Any] = pa.Table.from_pandas(_UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise
| 245 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase__: int = cst_fwd.get(snake_case , np.inf )
lowercase__: Optional[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase__: int = new_cost_f
lowercase__: Dict = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase__: Any = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> int:
lowercase__: Tuple = -1
lowercase__: str = set()
lowercase__: List[Any] = set()
lowercase__: List[Any] = {source: 0}
lowercase__: str = {destination: 0}
lowercase__: Tuple = {source: None}
lowercase__: Optional[Any] = {destination: None}
lowercase__: PriorityQueue[Any] = PriorityQueue()
lowercase__: PriorityQueue[Any] = PriorityQueue()
lowercase__: Optional[int] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase__ , lowercase__: List[Any] = queue_forward.get()
visited_forward.add(snake_case )
lowercase__ , lowercase__: Optional[int] = queue_backward.get()
visited_backward.add(snake_case )
lowercase__: Optional[int] = pass_and_relaxation(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
lowercase__: List[str] = pass_and_relaxation(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase__: Union[str, Any] = shortest_distance
return shortest_path_distance
__lowerCAmelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCAmelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 335 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase_ : List[Any] = 300 # TEMPERATURE (unit = K)
def _lowerCamelCase (__lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ) -> float:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 489 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : int = 3_2 , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 2_5_5 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , lowerCamelCase : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , lowerCamelCase : bool = True , lowerCamelCase : Any=7 , lowerCamelCase : Any=3_0 , lowerCamelCase : Optional[int]=4_0_0 , lowerCamelCase : Any=3 , ):
'''simple docstring'''
a__ = parent
a__ = do_resize
a__ = size if size is not None else {"shortest_edge": 2_8_8}
a__ = size_divisor
a__ = do_rescale
a__ = rescale_factor
a__ = do_normalize
a__ = do_center_crop
a__ = image_mean
a__ = image_std
a__ = do_pad
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
def __a ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=False ):
'''simple docstring'''
if not batched:
a__ = self.size["shortest_edge"]
a__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
a__ = size / min(lowerCamelCase , lowerCamelCase )
if h < w:
a__ , a__ = size, scale * w
else:
a__ , a__ = scale * h, size
a__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowerCamelCase , lowerCamelCase ) > max_size:
a__ = max_size / max(lowerCamelCase , lowerCamelCase )
a__ = newh * scale
a__ = neww * scale
a__ , a__ = int(newh + 0.5 ), int(neww + 0.5 )
a__ , a__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
a__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = BridgeTowerImageProcessingTester(self )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : List[str] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 489 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = nums.pop(0 )
UpperCAmelCase__ : List[str] = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def backtrack(__UpperCamelCase ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start] # backtrack
UpperCAmelCase__ : List[str] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 194 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase ( a ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase ( a , a , a ):
'''simple docstring'''
return max(metric_fn(a , a ) for gt in ground_truths )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = [line.strip() for line in open(a , "r" ).readlines()]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE_ :int = pd.read_csv(a , sep="\t" , header=a )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE_ :Any = ast.literal_eval(a )
answers.append(a )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = [line.strip() for line in open(a , "r" ).readlines()]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [[reference] for reference in references]
SCREAMING_SNAKE_CASE_ :Dict = 0
for prediction, ground_truths in zip(a , a ):
total += 1
em += metric_max_over_ground_truths(a , a , a )
fa += metric_max_over_ground_truths(a , a , a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 100.0 * em / total
SCREAMING_SNAKE_CASE_ :Any = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = args.k
SCREAMING_SNAKE_CASE_ :Optional[Any] = [line.strip() for line in open(a , "r" ).readlines()]
SCREAMING_SNAKE_CASE_ :List[str] = [line.strip() for line in open(a , "r" ).readlines()]
SCREAMING_SNAKE_CASE_ :List[str] = 0
for hypo, reference in zip(a , a ):
SCREAMING_SNAKE_CASE_ :str = set(hypo.split("\t" )[:k] )
SCREAMING_SNAKE_CASE_ :List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def lowercase ( a , a , a ):
'''simple docstring'''
def strip_title(a ):
if title.startswith("\"" ):
SCREAMING_SNAKE_CASE_ :List[str] = title[1:]
if title.endswith("\"" ):
SCREAMING_SNAKE_CASE_ :int = title[:-1]
return title
SCREAMING_SNAKE_CASE_ :Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a , return_tensors="pt" , padding=a , truncation=a , )["input_ids"].to(args.device )
SCREAMING_SNAKE_CASE_ :Any = rag_model.rag.question_encoder(a )
SCREAMING_SNAKE_CASE_ :int = question_enc_outputs[0]
SCREAMING_SNAKE_CASE_ :List[str] = rag_model.retriever(
a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_ :Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE_ :Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE_ :Dict = [strip_title(a ) for title in docs["title"]]
provenance_strings.append("\t".join(a ) )
return provenance_strings
def lowercase ( a , a , a ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a , return_tensors="pt" , padding=a , truncation=a )
SCREAMING_SNAKE_CASE_ :Any = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE_ :Optional[int] = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE_ :List[Any] = rag_model.generate( # rag_model overwrites generate
a , attention_mask=a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE_ :List[str] = rag_model.retriever.generator_tokenizer.batch_decode(a , skip_special_tokens=a )
if args.print_predictions:
for q, a in zip(a , a ):
logger.info("Q: {} - A: {}".format(a , a ) )
return answers
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=a , choices=["exact", "compressed", "legacy"] , type=a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=a , type=a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=a , type=a , required=a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=a , type=a , required=a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=a , type=a , required=a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
SCREAMING_SNAKE_CASE_ :Dict = parser.parse_args()
SCREAMING_SNAKE_CASE_ :List[str] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE_ :int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
SCREAMING_SNAKE_CASE_ :Tuple = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE_ :Optional[int] = args.index_path
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BartForConditionalGeneration
SCREAMING_SNAKE_CASE_ :str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , a )
SCREAMING_SNAKE_CASE_ :Tuple = get_scores if args.eval_mode == "e2e" else get_precision_at_k
SCREAMING_SNAKE_CASE_ :Optional[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE_ :int = RagRetriever.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_class.from_pretrained(a , retriever=a , **a )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE_ :Tuple = model_class.from_pretrained(a , **a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
SCREAMING_SNAKE_CASE_ :Any = []
for line in tqdm(a ):
questions.append(line.strip() )
if len(a ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE_ :str = evaluate_batch_fn(a , a , a )
preds_file.write("\n".join(a ) + "\n" )
preds_file.flush()
SCREAMING_SNAKE_CASE_ :Dict = []
if len(a ) > 0:
SCREAMING_SNAKE_CASE_ :List[Any] = evaluate_batch_fn(a , a , a )
preds_file.write("\n".join(a ) )
preds_file.flush()
score_fn(a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 631 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowercase ( a , a , a , a , a , a , a ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ :int = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ :Dict = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ :str = Pipe()
SCREAMING_SNAKE_CASE_ :Optional[Any] = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = temp_rs
SCREAMING_SNAKE_CASE_ :Any = temp_rr
for i in range(1 , len(a ) - 1 ):
SCREAMING_SNAKE_CASE_ :int = Pipe()
SCREAMING_SNAKE_CASE_ :Dict = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp_rs
SCREAMING_SNAKE_CASE_ :int = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
SCREAMING_SNAKE_CASE_ :Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*a )
SCREAMING_SNAKE_CASE_ :int = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 631 | 1 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None ):
"""simple docstring"""
_lowercase : Optional[int] = {}
if frame_sampling_rate is not None:
_lowercase : str = frame_sampling_rate
if num_frames is not None:
_lowercase : int = num_frames
_lowercase : str = {}
if top_k is not None:
_lowercase : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , lowerCamelCase_ : Union[str, List[str]] , **lowerCamelCase_ : str ):
"""simple docstring"""
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : int=None , lowerCamelCase_ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
_lowercase : str = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
_lowercase : Tuple = BytesIO(requests.get(UpperCamelCase_ ).content )
_lowercase : str = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
_lowercase : Any = 0
_lowercase : Dict = num_frames * frame_sampling_rate - 1
_lowercase : Optional[int] = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
_lowercase : Union[str, Any] = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
_lowercase : Dict = list(UpperCamelCase_ )
_lowercase : Optional[Any] = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
_lowercase : List[str] = self.model(**UpperCamelCase_ )
return model_outputs
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : List[Any] = model_outputs.logits.softmax(-1 )[0]
_lowercase : Tuple = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = scores.tolist()
_lowercase : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 704 | """simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,):
"""simple docstring"""
_lowercase : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
_lowercase : Optional[Any] = 1
_lowercase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
_lowercase : List[Any] = init[0]
_lowercase : Optional[Any] = init[1]
_lowercase : Union[str, Any] = 0
_lowercase : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase : Dict = [[f, g, x, y]]
_lowercase : Tuple = False # flag that is set when search is complete
_lowercase : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase : List[Any] = cell.pop()
_lowercase : Optional[Any] = next_cell[2]
_lowercase : List[str] = next_cell[3]
_lowercase : Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase : Optional[Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
_lowercase : Any = x + DIRECTIONS[i][0]
_lowercase : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase : int = g + cost
_lowercase : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase : List[Any] = 1
_lowercase : Dict = i
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = goal[0]
_lowercase : Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase : str = x - DIRECTIONS[action[x][y]][0]
_lowercase : Any = y - DIRECTIONS[action[x][y]][1]
_lowercase : Dict = xa
_lowercase : Tuple = ya
invpath.append([x, y] )
_lowercase : List[str] = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 283 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[Any] =["input_features", "is_longer"]
def __init__( self ,_snake_case=64 ,_snake_case=4_80_00 ,_snake_case=4_80 ,_snake_case=10 ,_snake_case=10_24 ,_snake_case=0.0 ,_snake_case=False ,_snake_case = 0 ,_snake_case = 1_40_00 ,_snake_case = None ,_snake_case = "fusion" ,_snake_case = "repeatpad" ,**_snake_case ,):
super().__init__(
feature_size=_snake_case ,sampling_rate=_snake_case ,padding_value=_snake_case ,return_attention_mask=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : List[Any] = top_db
UpperCAmelCase_ : str = truncation
UpperCAmelCase_ : Dict = padding
UpperCAmelCase_ : List[Any] = fft_window_size
UpperCAmelCase_ : Optional[Any] = (fft_window_size >> 1) + 1
UpperCAmelCase_ : Dict = hop_length
UpperCAmelCase_ : List[Any] = max_length_s
UpperCAmelCase_ : List[Any] = max_length_s * sampling_rate
UpperCAmelCase_ : int = sampling_rate
UpperCAmelCase_ : List[str] = frequency_min
UpperCAmelCase_ : str = frequency_max
UpperCAmelCase_ : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_snake_case ,min_frequency=_snake_case ,max_frequency=_snake_case ,sampling_rate=_snake_case ,norm=_snake_case ,mel_scale="htk" ,)
UpperCAmelCase_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_snake_case ,min_frequency=_snake_case ,max_frequency=_snake_case ,sampling_rate=_snake_case ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Any = spectrogram(
_snake_case ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_snake_case ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ : List[str] = [0]
# randomly choose index for each part
UpperCAmelCase_ : Dict = np.random.choice(ranges[0] )
UpperCAmelCase_ : Tuple = np.random.choice(ranges[1] )
UpperCAmelCase_ : Any = np.random.choice(ranges[2] )
UpperCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase_ : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase_ : str = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase_ : Optional[Any] = torch.tensor(mel[None, None, :] )
UpperCAmelCase_ : Dict = torch.nn.functional.interpolate(
_snake_case ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=_snake_case )
UpperCAmelCase_ : Tuple = mel_shrink[0][0].numpy()
UpperCAmelCase_ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase_ : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase_ : int = len(_snake_case ) - max_length
UpperCAmelCase_ : Optional[Any] = np.random.randint(0 ,overflow + 1 )
UpperCAmelCase_ : Optional[Any] = waveform[idx : idx + max_length]
UpperCAmelCase_ : List[str] = self._np_extract_fbank_features(_snake_case ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase_ : List[Any] = self._np_extract_fbank_features(_snake_case ,self.mel_filters )
UpperCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase_ : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase_ : List[str] = np.stack([mel, mel, mel, mel] ,axis=0 )
UpperCAmelCase_ : Tuple = False
else:
UpperCAmelCase_ : List[str] = self._random_mel_fusion(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
UpperCAmelCase_ : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase_ : List[Any] = int(max_length / len(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = np.stack(np.tile(_snake_case ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase_ : Any = int(max_length / len(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = np.stack(np.tile(_snake_case ,_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = np.pad(_snake_case ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
UpperCAmelCase_ : str = self._np_extract_fbank_features(_snake_case ,self.mel_filters )
UpperCAmelCase_ : Any = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
UpperCAmelCase_ : Optional[int] = self._np_extract_fbank_features(_snake_case ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = truncation if truncation is not None else self.truncation
UpperCAmelCase_ : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase_ : Tuple = isinstance(_snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Optional[int] = is_batched_numpy or (
isinstance(_snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : List[Any] = [np.asarray(_snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case ,np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = np.asarray(_snake_case ,dtype=np.floataa )
elif isinstance(_snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : int = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase_ : Union[str, Any] = [
self._get_input_mel(_snake_case ,max_length if max_length else self.nb_max_samples ,_snake_case ,_snake_case )
for waveform in raw_speech
]
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase_ : Any = np.random.randint(0 ,len(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = True
if isinstance(input_mel[0] ,_snake_case ):
UpperCAmelCase_ : Dict = [np.asarray(_snake_case ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase_ : Any = [[longer] for longer in is_longer]
UpperCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
UpperCAmelCase_ : int = BatchFeature(_snake_case )
if return_tensors is not None:
UpperCAmelCase_ : str = input_features.convert_to_tensors(_snake_case )
return input_features
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : torch.FloatTensor
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self, __magic_name__ = 32, __magic_name__ = 64, __magic_name__ = 20, __magic_name__ = 768, __magic_name__=77, __magic_name__=4, __magic_name__ = 0.0, __magic_name__ = "silu", __magic_name__ = None, __magic_name__ = None, __magic_name__ = "linear", __magic_name__ = "prd", __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = attention_head_dim
UpperCamelCase__ : Union[str, Any] = num_attention_heads * attention_head_dim
UpperCamelCase__ : str = additional_embeddings
UpperCamelCase__ : Any = time_embed_dim or inner_dim
UpperCamelCase__ : str = embedding_proj_dim or embedding_dim
UpperCamelCase__ : Union[str, Any] = clip_embed_dim or embedding_dim
UpperCamelCase__ : Union[str, Any] = Timesteps(__magic_name__, __magic_name__, 0 )
UpperCamelCase__ : Optional[Any] = TimestepEmbedding(__magic_name__, __magic_name__, out_dim=__magic_name__, act_fn=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = nn.Linear(__magic_name__, __magic_name__ )
if embedding_proj_norm_type is None:
UpperCamelCase__ : List[str] = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase__ : Tuple = nn.LayerNorm(__magic_name__ )
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase__ : Dict = nn.Linear(__magic_name__, __magic_name__ )
if encoder_hid_proj_type is None:
UpperCamelCase__ : int = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase__ : Any = nn.Linear(__magic_name__, __magic_name__ )
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase__ : List[Any] = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, __magic_name__ ) )
if added_emb_type == "prd":
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1, 1, __magic_name__ ) )
elif added_emb_type is None:
UpperCamelCase__ : Optional[int] = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase__ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
__magic_name__, __magic_name__, __magic_name__, dropout=__magic_name__, activation_fn='''gelu''', attention_bias=__magic_name__, )
for d in range(__magic_name__ )
] )
if norm_in_type == "layer":
UpperCamelCase__ : str = nn.LayerNorm(__magic_name__ )
elif norm_in_type is None:
UpperCamelCase__ : str = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(__magic_name__ )
UpperCamelCase__ : int = nn.Linear(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase__ : List[Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''', __magic_name__, persistent=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = nn.Parameter(torch.zeros(1, __magic_name__ ) )
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1, __magic_name__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase__ ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {}
def fn_recursive_add_processors(__magic_name__, __magic_name__, __magic_name__ ):
if hasattr(__magic_name__, '''set_processor''' ):
UpperCamelCase__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", __magic_name__, __magic_name__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__magic_name__, __magic_name__, __magic_name__ )
return processors
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(__magic_name__, __magic_name__ ) and len(__magic_name__ ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__magic_name__ )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__magic_name__, __magic_name__, __magic_name__ ):
if hasattr(__magic_name__, '''set_processor''' ):
if not isinstance(__magic_name__, __magic_name__ ):
module.set_processor(__magic_name__ )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", __magic_name__, __magic_name__ )
for name, module in self.named_children():
fn_recursive_attn_processor(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = True, ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = hidden_states.shape[0]
UpperCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(__magic_name__ ):
UpperCamelCase__ : Tuple = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : int = timesteps * torch.ones(__magic_name__, dtype=timesteps.dtype, device=timesteps.device )
UpperCamelCase__ : Optional[Any] = self.time_proj(__magic_name__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase__ : List[Any] = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase__ : Union[str, Any] = self.time_embedding(__magic_name__ )
if self.embedding_proj_norm is not None:
UpperCamelCase__ : List[Any] = self.embedding_proj_norm(__magic_name__ )
UpperCamelCase__ : int = self.embedding_proj(__magic_name__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase__ : Tuple = self.encoder_hidden_states_proj(__magic_name__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCamelCase__ : Tuple = self.proj_in(__magic_name__ )
UpperCamelCase__ : List[Any] = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase__ : str = []
UpperCamelCase__ : Any = 0
if encoder_hidden_states is not None:
additional_embeds.append(__magic_name__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase__ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase__ : Optional[int] = hidden_states[:, None, :]
UpperCamelCase__ : List[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase__ : str = self.prd_embedding.to(hidden_states.dtype ).expand(__magic_name__, -1, -1 )
additional_embeds.append(__magic_name__ )
UpperCamelCase__ : Dict = torch.cat(
__magic_name__, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase__ : int = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase__ : Union[str, Any] = F.pad(
__magic_name__, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
UpperCamelCase__ : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase__ : List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCamelCase__ : Union[str, Any] = F.pad(__magic_name__, (0, self.additional_embeddings), value=0.0 )
UpperCamelCase__ : str = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase__ : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
UpperCamelCase__ : Optional[int] = self.norm_in(__magic_name__ )
for block in self.transformer_blocks:
UpperCamelCase__ : Optional[int] = block(__magic_name__, attention_mask=__magic_name__ )
UpperCamelCase__ : List[Any] = self.norm_out(__magic_name__ )
if self.prd_embedding is not None:
UpperCamelCase__ : Union[str, Any] = hidden_states[:, -1]
else:
UpperCamelCase__ : Optional[Any] = hidden_states[:, additional_embeddings_len:]
UpperCamelCase__ : Optional[int] = self.proj_to_clip_embeddings(__magic_name__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 369 |
from manim import *
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = Rectangle(height=0.5, width=0.5 )
UpperCamelCase__ : Any = Rectangle(height=0.25, width=0.25 )
UpperCamelCase__ : Dict = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = Text('''CPU''', font_size=24 )
UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : int = [mem.copy() for i in range(4 )]
UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = Text('''GPU''', font_size=24 )
UpperCamelCase__ : str = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
gpu.move_to([-1, -1, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : str = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = Text('''Model''', font_size=24 )
UpperCamelCase__ : Tuple = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
model.move_to([3, -1.0, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : List[Any] = []
for i, rect in enumerate(__magic_name__ ):
rect.set_stroke(__magic_name__ )
UpperCamelCase__ : Tuple = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=__magic_name__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=__magic_name__, buff=0.0 )
self.add(__magic_name__ )
model_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__, *__magic_name__, *__magic_name__ )
UpperCamelCase__ : Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Dict = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = Text('''Loaded Checkpoint''', font_size=24 )
UpperCamelCase__ : Optional[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : List[str] = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Optional[int] = fill.copy().set_fill(__magic_name__, opacity=0.7 )
target.move_to(__magic_name__ )
ckpt_arr.append(__magic_name__ )
UpperCamelCase__ : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__, *__magic_name__ )
UpperCamelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ : List[str] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
self.add(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, )
blue_text.next_to(__magic_name__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(__magic_name__ )
UpperCamelCase__ : Dict = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.", font_size=24, )
step_a.move_to([2, 2, 0] )
UpperCamelCase__ : Any = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ : int = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Any = Text('''Disk''', font_size=24 )
UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__magic_name__, run_time=3 ), Write(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ) )
UpperCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Union[str, Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) )
self.play(*__magic_name__ )
self.play(FadeOut(__magic_name__ ) )
UpperCamelCase__ : Optional[int] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection.", font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__magic_name__, run_time=3 ) )
self.play(
FadeOut(__magic_name__, __magic_name__, *__magic_name__, *__magic_name__ ), )
self.wait()
| 369 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : int = {}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'llama'
_lowercase = ['past_key_values']
def __init__( self : Dict , lowerCamelCase__ : str=32_000 , lowerCamelCase__ : Dict=4_096 , lowerCamelCase__ : Tuple=11_008 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : int=None , lowerCamelCase__ : Union[str, Any]="silu" , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Dict=1E-6 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Union[str, Any] , ):
a__ : List[str] = vocab_size
a__ : str = max_position_embeddings
a__ : Dict = hidden_size
a__ : List[str] = intermediate_size
a__ : Dict = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a__ : Tuple = num_attention_heads
a__ : str = num_key_value_heads
a__ : Dict = hidden_act
a__ : Optional[int] = initializer_range
a__ : str = rms_norm_eps
a__ : Optional[Any] = pretraining_tp
a__ : int = use_cache
a__ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
a__ : Tuple = self.rope_scaling.get("type" , lowerCamelCase__ )
a__ : Tuple = self.rope_scaling.get("factor" , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 37 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Tuple = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE : List[Any] = 1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[int] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 + _cos) / 2
SCREAMING_SNAKE_CASE : str = -1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Dict = -2 * _cos
SCREAMING_SNAKE_CASE : int = 1 - alpha
SCREAMING_SNAKE_CASE : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[int] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[str] = _sin / 2
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = -ba
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : List[str] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : str = 1 - alpha
SCREAMING_SNAKE_CASE : List[str] = -2 * _cos
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha
SCREAMING_SNAKE_CASE : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : str = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + alpha * big_a
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha * big_a
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha / big_a
SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha / big_a
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Any = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Any = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[str] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : Dict = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE : Tuple = 2 * big_a * mpc
SCREAMING_SNAKE_CASE : List[Any] = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE : Dict = ppmc + aaa
SCREAMING_SNAKE_CASE : int = -2 * pmpc
SCREAMING_SNAKE_CASE : Dict = ppmc - aaa
SCREAMING_SNAKE_CASE : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : str = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Dict = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE : Any = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE : Optional[int] = pmc + aaa
SCREAMING_SNAKE_CASE : List[Any] = 2 * mpc
SCREAMING_SNAKE_CASE : str = pmc - aaa
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 507 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.mean(1 )
# Centralize the data of class i
lowerCamelCase_ =data - column_reshape(_A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =np.dot(_A , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =features.mean(1 )
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.shape[1]
lowerCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
# Check if the features have been loaded
if features.any():
lowerCamelCase_ =features.mean(1 )
# Center the dataset
lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) )
lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1]
lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCamelCase_ , lowerCamelCase_ =eigh(
covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , )
lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A )
lowerCamelCase_ =svd_matrix[:, 0:dimensions]
lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
# Create dummy dataset with 2 classes and 3 features
lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCamelCase_ =np.array([0, 0, 0, 1, 1] )
lowerCamelCase_ =2
lowerCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =linear_discriminant_analysis(
_A , _A , _A , _A )
if isinstance(_A , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCamelCase_ =2
lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =principal_component_analysis(_A , _A )
if not np.allclose(_A , _A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_features", "is_longer"]
def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = top_db
__UpperCAmelCase : Optional[Any] = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : int = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : List[Any] = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : Tuple = max_length_s * sampling_rate
__UpperCAmelCase : str = sampling_rate
__UpperCAmelCase : int = frequency_min
__UpperCAmelCase : Optional[Any] = frequency_max
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Dict = copy.deepcopy(self.__dict__)
__UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , )
return log_mel_spectrogram.T
def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : str = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
# randomly choose index for each part
__UpperCAmelCase : Dict = np.random.choice(ranges[0])
__UpperCAmelCase : List[str] = np.random.choice(ranges[1])
__UpperCAmelCase : List[Any] = np.random.choice(ranges[2])
__UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :])
__UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy()
__UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length
__UpperCAmelCase : int = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length]
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
__UpperCAmelCase : int = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : str = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
__UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : int = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Optional[int] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = True
if isinstance(input_mel[0] , UpperCamelCase_):
__UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : List[str] = [[longer] for longer in is_longer]
__UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
__UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 77 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
snake_case__ : int = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int , __a : Optional[Any]=False , __a : Tuple=False , __a : Tuple=6.0 , __a : str=None , __a : List[str]=False , __a : str=False , __a : Optional[Any]=None , __a : Dict="fp4" , __a : Optional[int]=False , **__a : Dict , ) ->str:
lowerCamelCase_ : str = load_in_abit
lowerCamelCase_ : Union[str, Any] = load_in_abit
lowerCamelCase_ : Tuple = llm_inta_threshold
lowerCamelCase_ : int = llm_inta_skip_modules
lowerCamelCase_ : List[str] = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase_ : Optional[Any] = llm_inta_has_fpaa_weight
lowerCamelCase_ : Dict = bnb_abit_quant_type
lowerCamelCase_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase_ : int = torch.floataa
elif isinstance(__a , __a ):
lowerCamelCase_ : Tuple = getattr(__a , __a )
elif isinstance(__a , torch.dtype ):
lowerCamelCase_ : List[str] = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def _lowerCAmelCase ( self : Any ) ->Union[str, Any]:
if not isinstance(self.llm_inta_threshold , __a ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __a ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __a ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __a ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __a ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __a ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def _lowerCAmelCase ( self : Optional[int] ) ->int:
return self.load_in_abit or self.load_in_abit
def _lowerCAmelCase ( self : List[str] ) ->List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowerCAmelCase ( cls : int , __a : Dict , __a : str , **__a : Any ) ->Optional[Any]:
lowerCamelCase_ : Optional[Any] = cls(**__a )
lowerCamelCase_ : List[Any] = []
for key, value in kwargs.items():
if hasattr(__a , __a ):
setattr(__a , __a , __a )
to_remove.append(__a )
for key in to_remove:
kwargs.pop(__a , __a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowerCAmelCase ( self : Optional[Any] , __a : Union[str, os.PathLike] ) ->Any:
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
lowerCamelCase_ : List[Any] = self.to_dict()
lowerCamelCase_ : Dict = json.dumps(__a , indent=2 , sort_keys=__a ) + """\n"""
writer.write(__a )
def _lowerCAmelCase ( self : Tuple ) ->Dict[str, Any]:
lowerCamelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Tuple = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[str] ) ->Any:
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def _lowerCAmelCase ( self : Optional[int] , __a : bool = True ) ->str:
if use_diff is True:
lowerCamelCase_ : Dict = self.to_diff_dict()
else:
lowerCamelCase_ : Any = self.to_dict()
return json.dumps(__a , indent=2 , sort_keys=__a ) + "\n"
def _lowerCAmelCase ( self : Optional[Any] ) ->Dict[str, Any]:
lowerCamelCase_ : Any = self.to_dict()
# get the default config dict
lowerCamelCase_ : List[str] = BitsAndBytesConfig().to_dict()
lowerCamelCase_ : str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase_ : List[Any] = value
return serializable_config_dict
| 278 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class snake_case_ ( snake_case_):
def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]:
super().__init__(*__lowercase , **__lowercase )
lowerCamelCase : Union[str, Any] ={}
def __lowercase ( self , __lowercase , *__lowercase , **__lowercase ) -> Union[str, Any]:
lowerCamelCase : str =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def __lowercase ( self , __lowercase , *__lowercase , __lowercase=1 , **__lowercase ) -> List[Any]:
lowerCamelCase : int =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
lowerCamelCase : str =[]
for i in range(__lowercase ):
lowerCamelCase : List[str] =placeholder_token + F"_{i}"
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
lowerCamelCase : Optional[int] =output
def __lowercase ( self , __lowercase , __lowercase=False , __lowercase=1.0 ) -> List[Any]:
if isinstance(__lowercase , __lowercase ):
lowerCamelCase : str =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : str =self.token_map[placeholder_token]
lowerCamelCase : Any =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Any =copy.copy(__lowercase )
random.shuffle(__lowercase )
lowerCamelCase : str =text.replace(__lowercase , ''' '''.join(__lowercase ) )
return text
def __call__( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> Optional[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def __lowercase ( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> int:
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 701 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
snake_case_ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def A__ ( SCREAMING_SNAKE_CASE_ ) -> int:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase : List[Any] =list(s_dict.keys() )
for key in keys:
lowerCamelCase : Dict =R'''.*/layers_(\d+)'''
lowerCamelCase : Optional[int] =key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] =re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict =re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
lowerCamelCase : Dict =re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
lowerCamelCase : List[str] =re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase : Dict =new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"{key} -> {new_key}" )
lowerCamelCase : Optional[Any] =s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Dict =s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Union[str, Any] =s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase : List[Any] =s_dict[key].shape[0]
lowerCamelCase : int =s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
snake_case_ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
lowerCamelCase : str =f.read()
lowerCamelCase : Union[str, Any] =re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase : str =float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase : Any =str(activation[1] )
lowerCamelCase : Tuple =num_experts
lowerCamelCase : Any =SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="./" , SCREAMING_SNAKE_CASE_=8 ) -> Dict:
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCamelCase : List[Any] =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
lowerCamelCase : Dict =convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =flax_params['''target''']
lowerCamelCase : Optional[int] =flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
lowerCamelCase : str =rename_keys(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] =unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
snake_case_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 262 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path / """file.csv"""
SCREAMING_SNAKE_CASE_: int =textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =tmp_path / """malformed_file.csv"""
SCREAMING_SNAKE_CASE_: Tuple =textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path / """csv_with_image.csv"""
SCREAMING_SNAKE_CASE_: Optional[int] =textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path / """csv_with_label.csv"""
SCREAMING_SNAKE_CASE_: int =textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path / """csv_with_int_list.csv"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return str(lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =Csv()
SCREAMING_SNAKE_CASE_: str =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowercase ) in record.message
for record in caplog.records )
@require_pil
def __magic_name__ ( lowercase ):
with open(lowercase , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE_: Dict =f.read().splitlines()[1]
SCREAMING_SNAKE_CASE_: Tuple =Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
SCREAMING_SNAKE_CASE_: List[Any] =csv._generate_tables([[csv_file_with_image]] )
SCREAMING_SNAKE_CASE_: int =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
SCREAMING_SNAKE_CASE_: int =pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def __magic_name__ ( lowercase ):
with open(lowercase , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read().splitlines()[1:]
SCREAMING_SNAKE_CASE_: str =Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
SCREAMING_SNAKE_CASE_: Tuple =csv._generate_tables([[csv_file_with_label]] )
SCREAMING_SNAKE_CASE_: int =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
SCREAMING_SNAKE_CASE_: List[str] =pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowercase ) for label in labels]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowercase : [int(lowercase ) for i in x.split()]} )
SCREAMING_SNAKE_CASE_: Union[str, Any] =csv._generate_tables([[csv_file_with_int_list]] )
SCREAMING_SNAKE_CASE_: Any =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
SCREAMING_SNAKE_CASE_: List[str] =pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 409 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =mock.Mock()
SCREAMING_SNAKE_CASE_: Tuple =500
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: str =HTTPError
SCREAMING_SNAKE_CASE_: Any ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_: Union[str, Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_: int =AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCAmelCase )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""test-image-processor""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_: Any =CustomImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
SCREAMING_SNAKE_CASE_: List[str] =AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 409 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class a__ ( unittest.TestCase ):
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_lowerCamelCase , )
assert hasattr(self , 'env' )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Tuple ={
'enabled': True,
'processes_per_host': 8,
}
UpperCamelCase_ : Dict ={
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
UpperCamelCase_ : Tuple ={'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
UpperCamelCase_ : int ='trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_lowerCamelCase , py_version='py36' , )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :List[Any] ):
'''simple docstring'''
TrainingJobAnalytics(_lowerCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self :str , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self.create_estimator(_lowerCamelCase )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ : List[str] =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ : List[Any] =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase_ : List[Any] =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ : List[str] =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowerCamelCase )
| 395 |
"""simple docstring"""
def A_ ( __lowercase = 10 ):
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
UpperCamelCase_ : int =10**n
UpperCamelCase_ : List[str] =2_84_33 * (pow(2 , 7_83_04_57 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 395 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE : int = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE : List[Any] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = cached_file(lowerCamelCase, lowerCamelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase, lowerCamelCase)))
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : Optional[int] = f.read()
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
self.assertTrue(os.path.isfile(lowerCamelCase))
# File is cached at the same place the second time.
_lowercase : Any = cached_file(lowerCamelCase, lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
# Using a specific revision to test the full commit hash.
_lowercase : Dict = cached_file(lowerCamelCase, lowerCamelCase, revision='9b8c223')
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
_lowercase : Tuple = cached_file('tiny-random-bert', lowerCamelCase)
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
_lowercase : Optional[Any] = cached_file(lowerCamelCase, lowerCamelCase, revision='aaaa')
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : List[str] = cached_file(lowerCamelCase, 'conf')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : str = cached_file(lowerCamelCase, 'conf')
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '.no_exist', lowerCamelCase, 'conf')))
_lowercase : List[Any] = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Optional[Any] = cached_file(lowerCamelCase, 'conf', local_files_only=lowerCamelCase, _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Dict = mock.Mock()
_lowercase : Any = 5_00
_lowercase : Optional[int] = {}
_lowercase : Optional[int] = HTTPError
_lowercase : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request', return_value=lowerCamelCase) as mock_head:
_lowercase : Dict = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_connection_errors=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased', 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
get_file_from_repo('bert-base-case', lowerCamelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased', lowerCamelCase, revision='ahaha')
_lowercase : int = get_file_from_repo('bert-base-cased', lowerCamelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase : Tuple = json.loads(open(lowerCamelCase, 'r').read())
self.assertEqual(config['hidden_size'], 7_68)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Tuple = Path(lowerCamelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase, 'a.txt'), str(lowerCamelCase))
self.assertIsNone(get_file_from_repo(lowerCamelCase, 'b.txt'))
| 89 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase_ ( __lowercase ):
'''simple docstring'''
UpperCAmelCase__ = 'data2vec-text'
def __init__( self : Dict , UpperCAmelCase__ : Optional[int]=30_522 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Tuple=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=1e-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : int="absolute" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Tuple , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class UpperCamelCase_ ( __lowercase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 714 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
def get_matched_characters(lowercase_ , lowercase_ ) -> str:
A__ = []
A__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A__ = int(max(0 , i - limit ) )
A__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
A__ = f"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
A__ = get_matched_characters(lowercase_ , lowercase_ )
A__ = get_matched_characters(lowercase_ , lowercase_ )
A__ = len(lowercase_ )
# transposition
A__ = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
A__ = 0.0
else:
A__ = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 177 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaControlnetPipeline
lowerCamelCase__ = ["image_embeds", "negative_image_embeds", "hint"]
lowerCamelCase__ = ["image_embeds", "negative_image_embeds", "hint"]
lowerCamelCase__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase__ = False
@property
def _snake_case ( self : Union[str, Any] ):
return 32
@property
def _snake_case ( self : Optional[Any] ):
return 32
@property
def _snake_case ( self : Optional[Any] ):
return self.time_input_dim
@property
def _snake_case ( self : Tuple ):
return self.time_input_dim * 4
@property
def _snake_case ( self : str ):
return 100
@property
def _snake_case ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def _snake_case ( self : Dict ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.dummy_unet
SCREAMING_SNAKE_CASE = self.dummy_movq
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _snake_case ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Any=0 ):
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create hint
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "cpu"
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
SCREAMING_SNAKE_CASE = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 255.0
SCREAMING_SNAKE_CASE = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A robot, 4k photo"
SCREAMING_SNAKE_CASE = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase ) | 16 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__: Any = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__: int = 4
lowercase__: Tuple = 4_8
lowercase__: str = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__: List[str] = [6, 6, 6, 6]
lowercase__: Union[str, Any] = 6_0
lowercase__: int = [6, 6, 6, 6]
lowercase__: List[Any] = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__: Optional[Any] = 4
lowercase__: Union[str, Any] = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__: Tuple = 1
lowercase__: Union[str, Any] = 1
lowercase__: Optional[int] = 1_2_6
lowercase__: Optional[int] = 7
lowercase__: str = 2_5_5.0
lowercase__: int = ''''''
return config
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
lowercase__: Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__: List[str] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
lowercase__: Union[str, Any] = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
lowercase__: int = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
lowercase__: Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__: Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__: Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__: Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__: Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__: List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowercase__: Dict = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowercase__: List[Any] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowercase__: Any = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowercase__: int = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
lowercase__: Tuple = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
lowercase__: int = '''layernorm.weight'''
if name == "norm.bias":
lowercase__: Tuple = '''layernorm.bias'''
if "conv_first" in name:
lowercase__: List[str] = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__: Tuple = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__: List[Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
lowercase__: Optional[int] = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
lowercase__: Tuple = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
lowercase__: Union[str, Any] = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
lowercase__: Tuple = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
lowercase__: List[Any] = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
lowercase__: Any = '''swin2sr.''' + name
return name
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
for key in orig_state_dict.copy().keys():
lowercase__: List[Any] = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
lowercase__: Optional[Any] = key.split('''.''' )
lowercase__: str = int(key_split[1] )
lowercase__: Tuple = int(key_split[4] )
lowercase__: Union[str, Any] = config.embed_dim
if "weight" in key:
lowercase__: Tuple = val[:dim, :]
lowercase__: Dict = val[dim : dim * 2, :]
lowercase__: Dict = val[-dim:, :]
else:
lowercase__: Optional[Any] = val[:dim]
lowercase__: Any = val[dim : dim * 2]
lowercase__: str = val[-dim:]
pass
else:
lowercase__: int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: int = get_config(__UpperCAmelCase )
lowercase__: str = SwinaSRForImageSuperResolution(__UpperCAmelCase )
model.eval()
lowercase__: Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )
lowercase__: int = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
lowercase__, lowercase__: int = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
lowercase__: List[Any] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
lowercase__: Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
lowercase__: Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__: Union[str, Any] = 1_2_6 if '''Jpeg''' in checkpoint_url else 2_5_6
lowercase__: List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__: Union[str, Any] = transforms(__UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__: Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__: int = model(__UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__: List[str] = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase__: int = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__: Tuple = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__: Any = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__: Tuple = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__: List[Any] = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__: str = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase__: Any = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__: int = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__: List[str] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
lowercase__: Tuple = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
lowercase__: str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 586 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
lowercase : int = logging.getLogger(__name__)
lowercase : str = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'bertabs'
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE : Tuple=5_1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=6 , SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE : str=8 , SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE : Optional[int]=0.2 , SCREAMING_SNAKE_CASE : Union[str, Any]=6 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : str=8 , SCREAMING_SNAKE_CASE : int=2_0_4_8 , SCREAMING_SNAKE_CASE : Optional[Any]=0.2 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_pos
lowerCAmelCase = enc_layers
lowerCAmelCase = enc_hidden_size
lowerCAmelCase = enc_heads
lowerCAmelCase = enc_ff_size
lowerCAmelCase = enc_dropout
lowerCAmelCase = dec_layers
lowerCAmelCase = dec_hidden_size
lowerCAmelCase = dec_heads
lowerCAmelCase = dec_ff_size
lowerCAmelCase = dec_dropout
| 159 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'transfo-xl'
lowerCAmelCase = ['mems']
lowerCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=2_6_7_7_3_5 , SCREAMING_SNAKE_CASE : Dict=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : List[str]=6_4 , SCREAMING_SNAKE_CASE : int=4_0_9_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=1_8 , SCREAMING_SNAKE_CASE : Dict=1_6_0_0 , SCREAMING_SNAKE_CASE : Any=1_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=-1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int="normal" , SCREAMING_SNAKE_CASE : Optional[int]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase = [False] + [False] * len(self.cutoffs )
lowerCAmelCase = d_model
lowerCAmelCase = d_embed
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = div_val
lowerCAmelCase = pre_lnorm
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = mem_len
lowerCAmelCase = same_length
lowerCAmelCase = attn_type
lowerCAmelCase = clamp_len
lowerCAmelCase = sample_softmax
lowerCAmelCase = adaptive
lowerCAmelCase = dropout
lowerCAmelCase = dropatt
lowerCAmelCase = untie_r
lowerCAmelCase = init
lowerCAmelCase = init_range
lowerCAmelCase = proj_init_std
lowerCAmelCase = init_std
lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 159 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""PoolFormerFeatureExtractor"""]
_A : Dict = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 | def __lowerCAmelCase ( A_ : int , A_ : int ) -> int:
return x if y == 0 else greatest_common_divisor(A_ , x % y )
def __lowerCAmelCase ( A_ : int , A_ : int ) -> int:
return (x * y) // greatest_common_divisor(A_ , A_ )
def __lowerCAmelCase ( A_ : int = 20 ) -> int:
__UpperCAmelCase = 1
for i in range(1 , n + 1 ):
__UpperCAmelCase = lcm(A_ , A_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 221 | 0 |
from __future__ import annotations
from random import choice
def snake_case (__lowercase ):
'''simple docstring'''
return choice(__lowercase )
def snake_case (__lowercase , __lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = random_pivot(__lowercase )
# partition based on pivot
# linear time
_snake_case : Any = [e for e in lst if e < pivot]
_snake_case : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowercase ) < k - 1:
return kth_number(__lowercase , k - len(__lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 | from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=0 ):
_snake_case : Optional[Any] = 1.0 if scale is None else scale
_snake_case : Optional[Any] = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] )
@property
def UpperCamelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self ):
return self.variance.sqrt()
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : List[Any] = args_dim
_snake_case : Any = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] )
_snake_case : List[Any] = domain_map
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : Optional[int] = function
def UpperCamelCase ( self , lowercase_ , *lowercase_ ):
return self.function(lowercase_ , *lowercase_ )
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , lowercase_ = 1 ):
_snake_case : Any = dim
_snake_case : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self , lowercase_ ):
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ):
_snake_case : Union[str, Any] = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim )
@property
def UpperCamelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self ):
return len(self.event_shape )
@property
def UpperCamelCase ( self ):
return 0.0
def UpperCamelCase ( self , lowercase_ ):
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self , *lowercase_ ):
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( lowercase_ ):
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCamelCase = StudentT
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : int = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_snake_case : Optional[Any] = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"loc": 1, "scale": 1}
_lowerCamelCase = Normal
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"total_count": 1, "logits": 1}
_lowerCamelCase = NegativeBinomial
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self , lowercase_ ):
_snake_case ,_snake_case : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None ):
_snake_case ,_snake_case : int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 580 | 0 |
'''simple docstring'''
import baseaa
def lowerCamelCase_ ( __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase_ ( __UpperCamelCase : bytes ) -> Any:
"""simple docstring"""
return baseaa.aaadecode(_A ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 7_6_8 , ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def lowerCamelCase_ ( self , UpperCAmelCase = None , UpperCAmelCase = None , ):
__lowerCamelCase = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds * self.std) + self.mean
return embeds
| 479 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
A_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __UpperCamelCase ( a, a, a, a, a, a, a, a=False, ) ->Dict:
output_path.parent.mkdir(parents=a, exist_ok=a)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a, a, f=output_path.as_posix(), input_names=a, output_names=a, dynamic_axes=a, do_constant_folding=a, use_external_data_format=a, enable_onnx_checker=a, opset_version=a, )
else:
export(
a, a, f=output_path.as_posix(), input_names=a, output_names=a, dynamic_axes=a, do_constant_folding=a, opset_version=a, )
@torch.no_grad()
def __UpperCamelCase ( a, a, a, a = False) ->Optional[int]:
lowerCamelCase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase__ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
lowerCamelCase__ = "cpu"
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(a, torch_dtype=a).to(a)
lowerCamelCase__ = Path(a)
# TEXT ENCODER
lowerCamelCase__ = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase__ = pipeline.text_encoder.config.hidden_size
lowerCamelCase__ = pipeline.tokenizer(
"A sample prompt", padding="max_length", max_length=pipeline.tokenizer.model_max_length, truncation=a, return_tensors="pt", )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=a, dtype=torch.intaa)), output_path=output_path / "text_encoder" / "model.onnx", ordered_input_names=["input_ids"], output_names=["last_hidden_state", "pooler_output"], dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
}, opset=a, )
del pipeline.text_encoder
# UNET
lowerCamelCase__ = pipeline.unet.config.in_channels
lowerCamelCase__ = pipeline.unet.config.sample_size
lowerCamelCase__ = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, a, a, a).to(device=a, dtype=a),
torch.randn(2).to(device=a, dtype=a),
torch.randn(2, a, a).to(device=a, dtype=a),
False,
), output_path=a, ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"], output_names=["out_sample"], dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
}, opset=a, use_external_data_format=a, )
lowerCamelCase__ = str(unet_path.absolute().as_posix())
lowerCamelCase__ = os.path.dirname(a)
lowerCamelCase__ = onnx.load(a)
# clean up existing tensor files
shutil.rmtree(a)
os.mkdir(a)
# collate external tensor files into one
onnx.save_model(
a, a, save_as_external_data=a, all_tensors_to_one_file=a, location="weights.pb", convert_attribute=a, )
del pipeline.unet
# VAE ENCODER
lowerCamelCase__ = pipeline.vae
lowerCamelCase__ = vae_encoder.config.in_channels
lowerCamelCase__ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase__ = lambda a, a: vae_encoder.encode(a, a)[0].sample()
onnx_export(
a, model_args=(
torch.randn(1, a, a, a).to(device=a, dtype=a),
False,
), output_path=output_path / "vae_encoder" / "model.onnx", ordered_input_names=["sample", "return_dict"], output_names=["latent_sample"], dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=a, )
# VAE DECODER
lowerCamelCase__ = pipeline.vae
lowerCamelCase__ = vae_decoder.config.latent_channels
lowerCamelCase__ = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase__ = vae_encoder.decode
onnx_export(
a, model_args=(
torch.randn(1, a, a, a).to(device=a, dtype=a),
False,
), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=a, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase__ = pipeline.safety_checker
lowerCamelCase__ = safety_checker.config.vision_config.num_channels
lowerCamelCase__ = safety_checker.config.vision_config.image_size
lowerCamelCase__ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, a, a, a, ).to(device=a, dtype=a),
torch.randn(1, a, a, a).to(device=a, dtype=a),
), output_path=output_path / "safety_checker" / "model.onnx", ordered_input_names=["clip_input", "images"], output_names=["out_images", "has_nsfw_concepts"], dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
}, opset=a, )
del pipeline.safety_checker
lowerCamelCase__ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
lowerCamelCase__ = pipeline.feature_extractor
else:
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder"), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder"), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder"), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / "unet"), scheduler=pipeline.scheduler, safety_checker=a, feature_extractor=a, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(a)
print("ONNX pipeline saved to", a)
del pipeline
del onnx_pipeline
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(a, provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
A_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 360 |
def __UpperCamelCase ( a, a) ->Dict:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(a):
for j in range(a):
if dist[i][j] != float("inf"):
print(int(dist[i][j]), end="\t")
else:
print("INF", end="\t")
print()
def __UpperCamelCase ( a, a) ->Optional[int]:
lowerCamelCase__ = [[float("inf") for _ in range(a)] for _ in range(a)]
for i in range(a):
for j in range(a):
lowerCamelCase__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a):
# looping through rows of graph array
for i in range(a):
# looping through columns of graph array
for j in range(a):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase__ = dist[i][k] + dist[k][j]
_print_dist(a, a)
return dist, v
if __name__ == "__main__":
A_ = int(input("Enter number of vertices: "))
A_ = int(input("Enter number of edges: "))
A_ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
A_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
A_ = int(input("Enter source:"))
A_ = int(input("Enter destination:"))
A_ = float(input("Enter weight:"))
A_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 360 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a : int = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
a : List[str] = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
a : Union[str, Any] = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= 0.0
for i, j in zip(snake_case__ , snake_case__ ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case__ , snake_case__ ) else 0.0
lowercase__ : Tuple= n_correct / len(snake_case__ )
return {
"accuracy": accuracy,
}
| 218 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=1000 , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= parent
lowercase__ : Any= batch_size
lowercase__ : str= seq_length
lowercase__ : str= is_training
lowercase__ : Optional[int]= use_input_mask
lowercase__ : Dict= use_token_type_ids
lowercase__ : Optional[int]= use_labels
lowercase__ : List[str]= vocab_size
lowercase__ : Optional[int]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Tuple= intermediate_size
lowercase__ : int= hidden_act
lowercase__ : Any= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : List[Any]= max_position_embeddings
lowercase__ : Optional[int]= type_vocab_size
lowercase__ : str= type_sequence_label_size
lowercase__ : Union[str, Any]= initializer_range
lowercase__ : Union[str, Any]= num_labels
lowercase__ : Dict= num_choices
lowercase__ : Dict= scope
lowercase__ : int= range_bbox
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase__ : str= ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ : Any= bbox[i, j, 3]
lowercase__ : Tuple= bbox[i, j, 1]
lowercase__ : int= t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ : str= bbox[i, j, 2]
lowercase__ : List[Any]= bbox[i, j, 0]
lowercase__ : int= t
lowercase__ : Optional[int]= tf.convert_to_tensor(snake_case__ )
lowercase__ : Any= None
if self.use_input_mask:
lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any]= None
if self.use_token_type_ids:
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int= None
lowercase__ : Tuple= None
lowercase__ : List[str]= None
if self.use_labels:
lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Union[str, Any]= ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Any= LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= TFLayoutLMModel(config=snake_case__ )
lowercase__ : Dict= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowercase__ : str= model(snake_case__ , snake_case__ , token_type_ids=snake_case__ )
lowercase__ : Any= model(snake_case__ , snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= TFLayoutLMForMaskedLM(config=snake_case__ )
lowercase__ : int= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.num_labels
lowercase__ : List[Any]= TFLayoutLMForSequenceClassification(config=snake_case__ )
lowercase__ : Optional[Any]= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.num_labels
lowercase__ : Union[str, Any]= TFLayoutLMForTokenClassification(config=snake_case__ )
lowercase__ : Tuple= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= TFLayoutLMForQuestionAnswering(config=snake_case__ )
lowercase__ : int= model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) : Any= config_and_inputs
lowercase__ : Optional[Any]= {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = 10
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= TFLayoutLMModelTester(self )
lowercase__ : List[Any]= ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int= TFLayoutLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : List[str]= tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
lowercase__ : List[str]= tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase__ : Tuple= tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
lowercase__ : Optional[int]= tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase__ : Dict= tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Union[str, Any]= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : Union[str, Any]= model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the sequence output on [0, :3, :3]
lowercase__ : Tuple= tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase__ : Tuple= tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case__ , atol=1e-3 ) )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
lowercase__ : int= TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Dict= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : List[Any]= model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase__ : Any= outputs.loss
lowercase__ : Union[str, Any]= (2,)
self.assertEqual(loss.shape , snake_case__ )
# test the shape of the logits
lowercase__ : Dict= outputs.logits
lowercase__ : Optional[int]= (2, 2)
self.assertEqual(logits.shape , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
lowercase__ : List[str]= TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Any= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : Optional[Any]= model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
# test the shape of the logits
lowercase__ : List[str]= outputs.logits
lowercase__ : Dict= tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
lowercase__ : List[Any]= TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : str= prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ : int= model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the shape of the logits
lowercase__ : List[str]= tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case__ )
self.assertEqual(outputs.end_logits.shape , snake_case__ )
| 218 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__ : int = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__ : Any = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
SCREAMING_SNAKE_CASE__ : List[str] = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__lowerCamelCase )
class _UpperCamelCase:
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
__a : Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
__a : Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
__a : int = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.''' )
__a : str = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
__a : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
__a : List[str] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
__a : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : int = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 1_6 , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 4 , ):
'''simple docstring'''
__a : Dict = reader_input['input_ids']
__a : Union[str, Any] = reader_output[:3]
__a : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : Dict = sequence_ids.index(self.pad_token_id )
else:
__a : str = len(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
__a : Any = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : int = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
__a : Tuple = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
__a : Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCamelCase )
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_ids''', '''attention_mask''']
| 706 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : Union[str, Any] = _readaa(lowerCamelCase_ )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__a : Any = _readaa(lowerCamelCase_ )
__a : int = _readaa(lowerCamelCase_ )
__a : List[Any] = _readaa(lowerCamelCase_ )
__a : str = bytestream.read(rows * cols * num_images )
__a : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
__a : Optional[Any] = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , 'Please use tf.one_hot on tensors.' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = labels_dense.shape[0]
__a : str = numpy.arange(lowerCamelCase_ ) * num_classes
__a : Any = numpy.zeros((num_labels, num_classes) )
__a : List[str] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , 'Please use tf.data to implement this functionality.' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : int=1_0 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
__a : List[str] = _readaa(lowerCamelCase_ )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__a : Optional[int] = _readaa(lowerCamelCase_ )
__a : Dict = bytestream.read(lowerCamelCase_ )
__a : Union[str, Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class _UpperCamelCase:
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Any=dtypes.floataa , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=None , ):
'''simple docstring'''
__a , __a : List[Any] = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__a : Optional[Any] = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__a : Dict = 1_0_0_0_0
__a : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__a : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a : str = images.astype(numpy.floataa )
__a : Optional[Any] = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__a : int = images
__a : Optional[Any] = labels
__a : Tuple = 0
__a : Tuple = 0
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self._images
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._labels
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self._epochs_completed
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=True ):
'''simple docstring'''
if fake_data:
__a : List[Any] = [1] * 7_8_4
__a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__a : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = self.images[perma]
__a : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a : List[str] = self._num_examples - start
__a : Tuple = self._images[start : self._num_examples]
__a : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a : str = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = self.images[perm]
__a : Any = self.labels[perm]
# Start next epoch
__a : Dict = 0
__a : List[Any] = batch_size - rest_num_examples
__a : str = self._index_in_epoch
__a : List[Any] = self._images[start:end]
__a : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__a : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , 'Please write your own downloading logic.' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
__a : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
__a : str = f.size()
print('Successfully downloaded' , lowerCamelCase_ , lowerCamelCase_ , 'bytes.' )
return filepath
@deprecated(
lowerCamelCase_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=dtypes.floataa , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=5_0_0_0 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
__a : List[str] = fake()
__a : Union[str, Any] = fake()
__a : Optional[Any] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
__a : Dict = DEFAULT_SOURCE_URL
__a : int = 'train-images-idx3-ubyte.gz'
__a : List[Any] = 'train-labels-idx1-ubyte.gz'
__a : Any = 't10k-images-idx3-ubyte.gz'
__a : Optional[int] = 't10k-labels-idx1-ubyte.gz'
__a : Optional[int] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : List[Any] = _extract_images(lowerCamelCase_ )
__a : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
__a : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Union[str, Any] = _extract_images(lowerCamelCase_ )
__a : Dict = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , 'rb' ) as f:
__a : Optional[Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
__a : Optional[Any] = (
'Validation size should be between 0 and '
f'''{len(lowerCamelCase_ )}. Received: {validation_size}.'''
)
raise ValueError(lowerCamelCase_ )
__a : int = train_images[:validation_size]
__a : Any = train_labels[:validation_size]
__a : Optional[Any] = train_images[validation_size:]
__a : int = train_labels[validation_size:]
__a : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__a : str = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 577 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A , _A ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
_UpperCAmelCase = int(_A )
_UpperCAmelCase = int(_A )
_UpperCAmelCase = []
for temp in range(int(_A ) ):
series.append(F"""1 / {pow(temp + 1 , int(_A ) )}""" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = int(input('''Enter the last number (nth term) of the P-Series'''))
a : str = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power)) | 555 |
"""simple docstring"""
import math
class a_ :
def _snake_case ( self : List[Any] , __UpperCamelCase : list[list[float]] , __UpperCamelCase : list[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _snake_case ( self : Dict , __UpperCamelCase : list[list[int | float]] , __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : float ) ->list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main() | 555 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _a : Tuple , _a : Tuple ) -> Optional[int]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase ( _a : str , _a : Any=0 ) -> int:
return sorted(UpperCamelCase__ , key=lambda _a : x[column] )
def _lowerCAmelCase ( _a : Union[str, Any] , _a : Dict , _a : Tuple=float("""inf""" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase__ ):
lowerCAmelCase_ : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : str = current_dis
return min_dis
def _lowerCAmelCase ( _a : Optional[Any] , _a : Optional[int] , _a : int=float("""inf""" ) ) -> List[str]:
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase__ ):
for j in range(max(0 , i - 6 ) , UpperCamelCase__ ):
lowerCAmelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Tuple = current_dis
return min_dis
def _lowerCAmelCase ( _a : List[str] , _a : int , _a : Any ) -> Any:
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase__ , UpperCamelCase__ )
# recursion
lowerCAmelCase_ : Dict = points_counts // 2
lowerCAmelCase_ : Any = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[:mid] , UpperCamelCase__ )
lowerCAmelCase_ : List[str] = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase_ : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase__ )
lowerCAmelCase_ : Tuple = dis_between_closest_in_strip(
UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ )
return min(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( _a : Optional[Any] , _a : Tuple ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = column_based_sort(UpperCamelCase__ , column=0 )
lowerCAmelCase_ : Optional[int] = column_based_sort(UpperCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 715 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Tuple = """src/diffusers"""
UpperCAmelCase_ : str = """."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : Any = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Union[str, Any] = spec.loader.load_module()
def _lowerCAmelCase ( _a : Optional[int] , _a : Optional[int] ) -> Tuple:
return line.startswith(_a ) or len(_a ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _a ) is not None
def _lowerCAmelCase ( _a : List[Any] ) -> int:
lowerCAmelCase_ : List[str] = object_name.split(""".""" )
lowerCAmelCase_ : List[Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Optional[int] = parts[i]
while i < len(_a ) and not os.path.isfile(os.path.join(_a , F'{module}.py' ) ):
i += 1
if i < len(_a ):
lowerCAmelCase_ : Dict = os.path.join(_a , parts[i] )
if i >= len(_a ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_a , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : List[Any] = """"""
lowerCAmelCase_ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_a ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_a ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : str = line_index
while line_index < len(_a ) and _should_continue(lines[line_index] , _a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : int = lines[start_index:line_index]
return "".join(_a )
UpperCAmelCase_ : str = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase_ : int = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase_ : List[str] = re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase ( _a : Dict ) -> List[Any]:
lowerCAmelCase_ : int = code.split("""\n""" )
lowerCAmelCase_ : Any = 0
while idx < len(_a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_a ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( _a : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = len(get_indent(_a ) ) > 0
if has_indent:
lowerCAmelCase_ : Union[str, Any] = F'class Bla:\n{code}'
lowerCAmelCase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_a )
lowerCAmelCase_ : str = black.format_str(_a , mode=_a )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = style_docstrings_in_code(_a )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( _a : Tuple , _a : str=False ) -> Tuple:
with open(_a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Dict = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_a ):
lowerCAmelCase_ : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : List[Any] = find_code_in_diffusers(_a )
lowerCAmelCase_ : Tuple = get_indent(_a )
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : List[Any] = True
while line_index < len(_a ) and should_continue:
line_index += 1
if line_index >= len(_a ):
break
lowerCAmelCase_ : List[Any] = lines[line_index]
lowerCAmelCase_ : Tuple = _should_continue(_a , _a ) and re.search(F'^{indent}# End copy' , _a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : str = lines[start_index:line_index]
lowerCAmelCase_ : Optional[Any] = """""".join(_a )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_a ) is None]
lowerCAmelCase_ : str = """\n""".join(_a )
# Before comparing, use the `replace_pattern` on the original code.
if len(_a ) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCAmelCase_ : Any = [_re_replace_pattern.search(_a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pattern.groups()
lowerCAmelCase_ : Union[str, Any] = re.sub(_a , _a , _a )
if option.strip() == "all-casing":
lowerCAmelCase_ : Dict = re.sub(obja.lower() , obja.lower() , _a )
lowerCAmelCase_ : Dict = re.sub(obja.upper() , obja.upper() , _a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : str = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase_ : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Dict = start_index + 1
if overwrite and len(_a ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_a )
return diffs
def _lowerCAmelCase ( _a : bool = False ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = glob.glob(os.path.join(_a , """**/*.py""" ) , recursive=_a )
lowerCAmelCase_ : Union[str, Any] = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(_a , _a )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_a ) > 0:
lowerCAmelCase_ : List[Any] = """\n""".join(_a )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 440 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=36 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any =parent
snake_case__ : str =batch_size
snake_case__ : str =seq_length
snake_case__ : Any =is_training
snake_case__ : Optional[Any] =use_input_mask
snake_case__ : List[str] =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Dict =vocab_size
snake_case__ : Union[str, Any] =embedding_size
snake_case__ : Union[str, Any] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : List[Any] =num_hidden_groups
snake_case__ : str =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : Dict =hidden_act
snake_case__ : Tuple =hidden_dropout_prob
snake_case__ : List[Any] =attention_probs_dropout_prob
snake_case__ : Any =max_position_embeddings
snake_case__ : int =type_vocab_size
snake_case__ : Union[str, Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : Optional[Any] =num_labels
snake_case__ : Dict =num_choices
snake_case__ : List[str] =scope
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_input_mask:
snake_case__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
if self.use_labels:
snake_case__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Dict =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : int =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , sentence_order_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =AlbertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict =AlbertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.num_labels
snake_case__ : Any =AlbertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
snake_case__ : int =self.num_labels
snake_case__ : int =AlbertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : Dict =self.num_choices
snake_case__ : Optional[Any] =AlbertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] =config_and_inputs
snake_case__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int =super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str =AlbertModelTester(self )
snake_case__ : Optional[Any] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[Any] =type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] =AlbertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict =AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case__ : Dict =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case__ : List[Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Dict =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 381 |
import sys
lowerCamelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Optional[int] =1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def lowercase_ ( SCREAMING_SNAKE_CASE : str = N ):
"""simple docstring"""
snake_case__ : List[str] =-sys.maxsize - 1
snake_case__ : Any =n[:13]
snake_case__ : List[str] =13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case__ : List[str] =substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case__ : Optional[int] =max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
snake_case__ : int =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 381 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> List[str]:
A : str = nn.functional.normalize(UpperCamelCase__ )
A : Tuple = nn.functional.normalize(UpperCamelCase__ )
return torch.mm(UpperCamelCase__ , normalized_text_embeds.t() )
class _lowercase ( a ):
_UpperCamelCase = CLIPConfig
_UpperCamelCase = ["""CLIPEncoderLayer"""]
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
A : Optional[Any] = CLIPVisionModel(config.vision_config )
A : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
A : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
A : Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
A : Union[str, Any] = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
A : str = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
A : Union[str, Any] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
A : Optional[int] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A : List[Any] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
A : Optional[Any] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
A : List[str] = []
A : List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
A : List[str] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A : List[Any] = special_cos_dist[i][concept_idx]
A : Optional[int] = self.special_care_embeds_weights[concept_idx].item()
A : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
A : str = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A : Dict = cos_dist[i][concept_idx]
A : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
A : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
A : str = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
A : List[Any] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
A : List[Any] = self.visual_projection(_UpperCAmelCase )
A : Any = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
A : Optional[Any] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A : Any = 0.0
A : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A : Any = torch.any(special_scores > 0 , dim=1 )
A : Optional[int] = special_care * 0.01
A : List[str] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A : int = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A : Dict = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 537 |
'''simple docstring'''
snake_case_ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
snake_case_ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
snake_case_ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case_ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
snake_case_ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
snake_case_ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
snake_case_ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
snake_case_ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 537 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = 1
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_lowercase )
@property
def _lowercase ( self ):
"""simple docstring"""
def extract(*_lowercase , **_lowercase ):
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = torch.ones([0] )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
_lowerCAmelCase = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_lowercase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_lowercase )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
_lowerCAmelCase = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_lowercase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(pipe.scheduler , _lowercase )
assert pipe.safety_checker is None
_lowerCAmelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(_lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_lowercase )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_lowerCAmelCase = unet.half()
_lowerCAmelCase = vae.half()
_lowerCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_lowercase )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_lowerCAmelCase = 4_003_660_346
_lowerCAmelCase = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_lowercase )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
_lowerCAmelCase = 2_734_971_755
_lowerCAmelCase = 7
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_lowerCAmelCase = 1_044_355_234
_lowerCAmelCase = 12
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_lowerCAmelCase = torch.manual_seed(_lowercase )
_lowerCAmelCase = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 5 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
__a = {}
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = super().add_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=1 , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
else:
__a = []
for i in range(lowerCamelCase ):
__a = placeholder_token + F"""_{i}"""
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__a = output
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 ) ->int:
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = []
for i in range(len(lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__a = self.token_map[placeholder_token]
__a = tokens[: 1 + int(len(lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__a = copy.copy(lowerCamelCase )
random.shuffle(lowerCamelCase )
__a = text.replace(lowerCamelCase , ' '.join(lowerCamelCase ) )
return text
def __call__( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , ) | 448 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 'swinv2'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=32 , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : Any = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
SCREAMING_SNAKE_CASE : List[str] = (0, 0, 0, 0)
| 333 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : KarrasVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) ->Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE : Optional[int] = step_output.prev_sample
SCREAMING_SNAKE_CASE : Any = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 333 | 1 |
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = self.num_choices
A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 30_522]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :str = logging.get_logger(__name__)
a_ :List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( __a ):
lowerCamelCase : int = '''marian'''
lowerCamelCase : str = ['''past_key_values''']
lowerCamelCase : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , _lowercase : Tuple=5_81_01 , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=10_24 , _lowercase : Tuple=12 , _lowercase : str=40_96 , _lowercase : int=16 , _lowercase : Dict=12 , _lowercase : Optional[Any]=40_96 , _lowercase : Union[str, Any]=16 , _lowercase : Union[str, Any]=0.0 , _lowercase : int=0.0 , _lowercase : Union[str, Any]=True , _lowercase : List[str]=True , _lowercase : int="gelu" , _lowercase : Any=10_24 , _lowercase : List[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : str=0.0 , _lowercase : List[str]=0.02 , _lowercase : List[Any]=5_81_00 , _lowercase : Optional[int]=False , _lowercase : Union[str, Any]=5_81_00 , _lowercase : int=0 , _lowercase : Optional[Any]=0 , _lowercase : Optional[int]=True , **_lowercase : Any , ):
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = encoder_layers
SCREAMING_SNAKE_CASE__ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : int = activation_dropout
SCREAMING_SNAKE_CASE__ : str = activation_function
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE__ : Dict = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=A__ , eos_token_id=A__ , is_encoder_decoder=A__ , decoder_start_token_id=A__ , forced_eos_token_id=A__ , **A__ , )
class lowercase ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : str = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_layers
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : int = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Tuple = super(A__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.num_layers
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any = -1 , _lowercase : int = -1 , _lowercase : Union[str, Any] = False , _lowercase : List[str] = None , ):
SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : List[str] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : str = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : List[str] = dict(**A__ , **A__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : int = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(A__ , A__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_layers
SCREAMING_SNAKE_CASE__ : Tuple = min(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = max(A__ , A__ ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(A__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A__ ),
torch.zeros(A__ ),
torch.zeros(A__ ),
torch.zeros(A__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(A__ , A__ ):
common_inputs["past_key_values"].append((torch.zeros(A__ ), torch.zeros(A__ )) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , _lowercase : List[str] = -1 , _lowercase : List[str] = -1 , _lowercase : List[Any] = False , _lowercase : Dict = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : List[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(A__ )
]
return common_inputs
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : int = -1 , _lowercase : Dict = -1 , _lowercase : Optional[int] = False , _lowercase : Any = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.num_special_tokens_to_add(A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : str = dict(tokenizer(A__ , return_tensors=A__ ) )
return common_inputs
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[str] = -1 , _lowercase : Dict = -1 , _lowercase : List[str] = False , _lowercase : Union[str, Any] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
return common_inputs
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = super()._flatten_past_key_values_(A__ , A__ , A__ , A__ )
else:
SCREAMING_SNAKE_CASE__ : str = super(A__ , self )._flatten_past_key_values_(
A__ , A__ , A__ , A__ )
@property
def lowercase__ ( self : Any ):
return 1E-4
| 700 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : int=13 , _lowercase : Any=7 , _lowercase : Tuple=True , _lowercase : Union[str, Any]=True , _lowercase : List[str]=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : List[str]=16 , _lowercase : List[Any]=36 , _lowercase : Any=6 , _lowercase : int=6 , _lowercase : str=6 , _lowercase : List[str]=37 , _lowercase : List[Any]="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : List[Any]=5_12 , _lowercase : List[str]=16 , _lowercase : Optional[int]=2 , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Any=None , ):
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = embedding_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_groups
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Dict = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : str = model(_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : str , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Any = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : str , _lowercase : Optional[Any] , _lowercase : int , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : Tuple , _lowercase : int , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : List[str] , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Dict = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : str = True
def lowercase__ ( self : Any , _lowercase : Any , _lowercase : Tuple , _lowercase : int=False ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = AlbertModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def lowercase__ ( self : Union[str, Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = AlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 250 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""MobileNetV2FeatureExtractor"""]
__lowerCamelCase = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 467 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float )-> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 | 0 |
'''simple docstring'''
import numpy as np
__lowerCamelCase : Tuple = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any ) -> None:
_UpperCamelCase =np.array(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : str ) -> np.ndarray:
_UpperCamelCase , _UpperCamelCase =np.where(letter == self.SQUARE )
_UpperCamelCase =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
_UpperCamelCase =self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : str ) -> str:
_UpperCamelCase =message.lower()
_UpperCamelCase =message.replace(''' ''' , '''''' )
_UpperCamelCase =message.replace('''j''' , '''i''' )
_UpperCamelCase =np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
_UpperCamelCase =self.letter_to_numbers(message[letter_index] )
_UpperCamelCase =numbers[0]
_UpperCamelCase =numbers[1]
_UpperCamelCase =first_step.reshape(2 * len(UpperCamelCase__ ) )
_UpperCamelCase =''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
_UpperCamelCase =int(second_step[numbers_index * 2] )
_UpperCamelCase =int(second_step[(numbers_index * 2) + 1] )
_UpperCamelCase =self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =encoded_message + letter
return encoded_message
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
_UpperCamelCase =message.lower()
message.replace(''' ''' , '''''' )
_UpperCamelCase =np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
_UpperCamelCase =self.letter_to_numbers(message[letter_index] )
_UpperCamelCase =numbers[0]
_UpperCamelCase =numbers[1]
_UpperCamelCase =first_step.reshape((2, len(UpperCamelCase__ )) )
_UpperCamelCase =''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
_UpperCamelCase =int(second_step[0, numbers_index] )
_UpperCamelCase =int(second_step[1, numbers_index] )
_UpperCamelCase =self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =decoded_message + letter
return decoded_message
| 271 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__lowerCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__lowerCamelCase : Optional[int] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = MBartTokenizer
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]="<s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Any , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_UpperCamelCase =vocab_file
_UpperCamelCase =False if not self.vocab_file else True
_UpperCamelCase =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_UpperCamelCase ={
lang_code: self.convert_tokens_to_ids(UpperCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase =src_lang if src_lang is not None else '''en_XX'''
_UpperCamelCase =self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self : str ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : str ) -> None:
_UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase =[self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[str] ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase =src_lang
_UpperCamelCase =self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =self.convert_tokens_to_ids(UpperCamelCase__ )
_UpperCamelCase =tgt_lang_id
return inputs
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : Optional[int] , ) -> BatchEncoding:
_UpperCamelCase =src_lang
_UpperCamelCase =tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str ) -> None:
_UpperCamelCase =self.convert_tokens_to_ids(UpperCamelCase__ )
_UpperCamelCase =[]
_UpperCamelCase =[self.eos_token_id, self.cur_lang_code]
_UpperCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str ) -> None:
_UpperCamelCase =self.convert_tokens_to_ids(UpperCamelCase__ )
_UpperCamelCase =[]
_UpperCamelCase =[self.eos_token_id, self.cur_lang_code]
_UpperCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 271 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Any = KandinskyVaaControlnetImgaImgPipeline
A_ : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
A_ : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
A_ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A_ : int = False
@property
def _A ( self : Dict ):
'''simple docstring'''
return 32
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def _A ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def _A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
'''simple docstring'''
return 100
@property
def _A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ : str = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _A ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.dummy_unet
lowerCAmelCase__ : Any = self.dummy_movq
lowerCAmelCase__ : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase__ : Union[str, Any] = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _A ( self : Optional[Any] , a__ : int , a__ : List[str]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : List[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : int = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = '''cpu'''
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : str = output.images
lowerCAmelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCAmelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ : List[Any] = init_image.resize((512, 512) )
lowerCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCAmelCase__ : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 255.0
lowerCAmelCase__ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase__ : Any = '''A robot, 4k photo'''
lowerCAmelCase__ : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCAmelCase__ : Tuple = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.85 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ : Optional[int] = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowerCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 378 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 50 ) -> int:
__SCREAMING_SNAKE_CASE :List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }') | 498 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__a : List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = to_pil_image(lowercase )
__lowercase , __lowercase = pil_image.size
__lowercase = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='''dict''' , config=lowercase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowercase = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
__lowercase = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
__lowercase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__lowercase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__lowercase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__lowercase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
__lowercase = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
__lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = "" , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowercase = get_size_dict(lowerCAmelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_value
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowercase = apply_ocr
__lowercase = ocr_lang
__lowercase = tesseract_config
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase = (size['''height'''], size['''width'''])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowercase = []
__lowercase = []
for image in images:
__lowercase , __lowercase = apply_tesseract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
words_batch.append(lowerCAmelCase__ )
boxes_batch.append(lowerCAmelCase__ )
if do_resize:
__lowercase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__lowercase = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCAmelCase__ )
if apply_ocr:
__lowercase = words_batch
__lowercase = boxes_batch
return data
| 718 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''image_processor''', '''tokenizer''']
__a : Dict = '''AutoImageProcessor'''
__a : List[Any] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = self.image_processor
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__lowercase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 522 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : str = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A ( _a ):
lowercase_ = 'yolos'
def __init__( self : int , lowerCAmelCase_ : Dict=7_68 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Any=30_72 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : Any=[5_12, 8_64] , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=1_00 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = qkv_bias
_a = num_detection_tokens
_a = use_mid_position_embeddings
_a = auxiliary_loss
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
class A ( _a ):
lowercase_ = version.parse('1.11' )
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1e-4
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 12
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
import argparse
import json
import subprocess
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = []
a_ = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
a_ = subprocess.run(_A , shell=_A , stdout=subprocess.PIPE )
a_ = output.stdout.decode('''utf-8''' )
a_ = json.loads(_A )
a_ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_A )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
if len(_A ) > 0:
a_ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return values.split(''',''' )
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
UpperCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 717 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(a__ )
class __lowercase ( a__ ):
_lowerCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *lowercase__ : Tuple , **lowercase__ : Any ):
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=lowercase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def __magic_name__ ( self : Any , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[Any] , ):
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
lowercase__ , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
a_ = handle_long_generation
preprocess_params.update(lowercase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
if len(lowercase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __magic_name__ ( self : int , *lowercase__ : int , **lowercase__ : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowercase__ , **lowercase__ )
def __call__( self : Union[str, Any] , lowercase__ : List[Any] , **lowercase__ : str ):
return super().__call__(lowercase__ , **lowercase__ )
def __magic_name__ ( self : Any , lowercase__ : List[Any] , lowercase__ : int="" , lowercase__ : Union[str, Any]=None , **lowercase__ : int ):
a_ = self.tokenizer(
prefix + prompt_text , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['''max_new_tokens''']
else:
a_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
a_ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , **lowercase__ : Tuple ):
a_ = model_inputs['''input_ids''']
a_ = model_inputs.get('''attention_mask''' , lowercase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
a_ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=lowercase__ , attention_mask=lowercase__ , **lowercase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(lowercase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(lowercase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __magic_name__ ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Dict=ReturnType.FULL_TEXT , lowercase__ : Tuple=True ):
a_ = model_outputs['''generated_sequence'''][0]
a_ = model_outputs['''input_ids''']
a_ = model_outputs['''prompt_text''']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'''generated_text''': all_text}
records.append(lowercase__ )
return records
| 143 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE_ = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Tuple ,*_UpperCamelCase : Union[str, Any] ,**_UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(_UpperCamelCase )
def __call__( self : Optional[Any] ,_UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**_UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(_UpperCamelCase ,**_UpperCamelCase )
def __A ( self : Any ,**_UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
return {}, {}, {}
def __A ( self : Optional[int] ,_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =load_image(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =image.size
SCREAMING_SNAKE_CASE__ =self.image_processor(images=_UpperCamelCase ,return_tensors=self.framework )
return model_inputs
def __A ( self : int ,_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.model(**_UpperCamelCase )
return model_outputs
def __A ( self : List[str] ,_UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode="""bicubic""" ,align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ =(output * 2_5_5 / np.max(_UpperCamelCase )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ =Image.fromarray(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =predicted_depth
SCREAMING_SNAKE_CASE__ =depth
return output_dict
| 588 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ ="""https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
SCREAMING_SNAKE_CASE__ =Image.open(requests.get(__UpperCamelCase, stream=__UpperCamelCase ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =val
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
SCREAMING_SNAKE_CASE__ =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ =torch.cat((q_bias, torch.zeros_like(__UpperCamelCase, requires_grad=__UpperCamelCase ), v_bias) )
SCREAMING_SNAKE_CASE__ =qkv_bias
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =364 if """coco""" in model_name else 224
SCREAMING_SNAKE_CASE__ =InstructBlipVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ =TaConfig.from_pretrained("""google/flan-t5-xl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ =TaConfig.from_pretrained("""google/flan-t5-xxl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE__ =LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""", vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE__ =LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""", vocab_size=32_001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE__ =InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
SCREAMING_SNAKE_CASE__ =InstructBlipConfig(vision_config=__UpperCamelCase, text_config=__UpperCamelCase, qformer_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase=None, __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-uncased""", truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE__ =TaTokenizerFast.from_pretrained("""google/flan-t5-xl""", truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE__ =LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""", truncation_side="""left""", bos_token="""</s>""", unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_blipa_config(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =InstructBlipForConditionalGeneration(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE__ ={
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
SCREAMING_SNAKE_CASE__ ="""cuda:1""" if torch.cuda.is_available() else """cpu"""
SCREAMING_SNAKE_CASE__ ="""cuda:2""" if torch.cuda.is_available() else """cpu"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =load_model_and_preprocess(
name=__UpperCamelCase, model_type=__UpperCamelCase, is_eval=__UpperCamelCase, device=__UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
SCREAMING_SNAKE_CASE__ =original_model.state_dict()
SCREAMING_SNAKE_CASE__ =create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ =state_dict.pop(__UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
SCREAMING_SNAKE_CASE__ =key.replace("""Qformer.bert""", """qformer""" )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ =key.replace("""self""", """attention""" )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE__ =key.replace("""llm_proj""", """language_projection""" )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ =key.replace("""t5_proj""", """language_projection""" )
if key.startswith("""llm_model""" ):
SCREAMING_SNAKE_CASE__ =key.replace("""llm_model""", """language_model""" )
if key.startswith("""t5""" ):
SCREAMING_SNAKE_CASE__ =key.replace("""t5""", """language""" )
SCREAMING_SNAKE_CASE__ =val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase, __UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__UpperCamelCase, strict=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =load_demo_image()
SCREAMING_SNAKE_CASE__ ="""What is unusual about this image?"""
# create processor
SCREAMING_SNAKE_CASE__ =BlipImageProcessor(
size={"""height""": image_size, """width""": image_size}, image_mean=__UpperCamelCase, image_std=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =InstructBlipProcessor(
image_processor=__UpperCamelCase, tokenizer=__UpperCamelCase, qformer_tokenizer=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =processor(images=__UpperCamelCase, text=__UpperCamelCase, return_tensors="""pt""" ).to(__UpperCamelCase )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE__ =vis_processors["""eval"""](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), __UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE__ =original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
SCREAMING_SNAKE_CASE__ =hf_model(**__UpperCamelCase ).logits
else:
SCREAMING_SNAKE_CASE__ =original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
SCREAMING_SNAKE_CASE__ =tokenizer("""\n""", return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100 )
SCREAMING_SNAKE_CASE__ =hf_model(**__UpperCamelCase, labels=__UpperCamelCase ).logits
print("""First values of original logits:""", original_logits[0, :3, :3] )
print("""First values of HF logits:""", logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE__ =1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), __UpperCamelCase, atol=__UpperCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
SCREAMING_SNAKE_CASE__ =original_model.generate({"""image""": original_pixel_values, """prompt""": prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
SCREAMING_SNAKE_CASE__ =hf_model.generate(
**__UpperCamelCase, do_sample=__UpperCamelCase, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE__ =2
print("""Original generation:""", __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =processor.batch_decode(__UpperCamelCase, skip_special_tokens=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[text.strip() for text in output_text]
print("""HF generation:""", __UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(f"""Salesforce/{model_name}""" )
hf_model.push_to_hub(f"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowerCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE__:Union[str, Any] = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCamelCase( a ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a )
def _lowerCamelCase( a ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a , id=a )
| 528 | """simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
# pass variant but use the non-variant filenames
__a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
__a = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def a__ ( self ):
__a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__a = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 528 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowercase ( lowerCAmelCase ):
_a : Optional[Any] = '''Speech2TextFeatureExtractor'''
_a : List[str] = '''Speech2TextTokenizer'''
def __init__( self : Dict , a : int , a : Optional[Any] ):
"""simple docstring"""
super().__init__(a , a )
__snake_case : Union[str, Any] =self.feature_extractor
__snake_case : Optional[Any] =False
def __call__( self : Optional[Any] , *a : Optional[int] , **a : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__snake_case : Optional[int] =kwargs.pop('''raw_speech''' )
else:
__snake_case : Dict =kwargs.pop('''audio''' , a )
__snake_case : Any =kwargs.pop('''sampling_rate''' , a )
__snake_case : Optional[int] =kwargs.pop('''text''' , a )
if len(a ) > 0:
__snake_case : List[str] =args[0]
__snake_case : Optional[Any] =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__snake_case : List[Any] =self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
__snake_case : Any =self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : Tuple =encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self : List[str] , *a : Dict , **a : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self : List[str] , *a : int , **a : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@contextmanager
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__snake_case : Any =True
__snake_case : Optional[Any] =self.tokenizer
yield
__snake_case : Optional[Any] =self.feature_extractor
__snake_case : Dict =False
| 497 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _UpperCamelCase ( self : Dict , a : Optional[Any]=0 ):
"""simple docstring"""
__snake_case : List[str] =np.random.RandomState(a )
__snake_case : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : str =np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Optional[Any] =np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : int =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : int =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : str =self.get_dummy_inputs()
__snake_case : int =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Union[str, Any] =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Tuple =np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Tuple =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Tuple =pipe(**a ).images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Dict =np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs['''prompt''']]
# forward
__snake_case : Any =pipe(**a )
__snake_case : str =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[Any] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : List[Any] =text_inputs['''input_ids''']
__snake_case : str =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case : Optional[Any] =prompt_embeds
# forward
__snake_case : Dict =pipe(**a )
__snake_case : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Optional[Any] =3 * ['''this is a negative prompt''']
__snake_case : List[str] =negative_prompt
__snake_case : str =3 * [inputs['''prompt''']]
# forward
__snake_case : int =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Union[str, Any] =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[int] =[]
for p in [prompt, negative_prompt]:
__snake_case : Optional[int] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : Optional[Any] =text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case : Optional[Any] =embeds
# forward
__snake_case : Any =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[str] =ort.SessionOptions()
__snake_case : Optional[int] =False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] ='''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__snake_case : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Any =np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[str] =DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[Any] ='''open neural network exchange'''
__snake_case : Optional[int] =np.random.RandomState(0 )
__snake_case : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Union[str, Any] =np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] ='''open neural network exchange'''
__snake_case : Optional[Any] =np.random.RandomState(0 )
__snake_case : Any =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Optional[int] =output.images
__snake_case : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Optional[int] =np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Union[str, Any] =0
def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None:
__snake_case : Dict =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : Union[str, Any] =latents[0, -3:, -3:, -1]
__snake_case : str =np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : List[Any] =latents[0, -3:, -3:, -1]
__snake_case : List[Any] =np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__snake_case : str =False
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] ='''Andromeda galaxy in a bottle'''
__snake_case : Optional[int] =np.random.RandomState(0 )
pipe(
prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a , a )
assert pipe.safety_checker is None
__snake_case : int =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 497 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = False, False, False
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = None
a_ = True
a_ = True
a_ = None
# Automatically constructed
a_ = "dict"
a_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
a_ = field(default="Audio" , init=UpperCamelCase_ , repr=UpperCamelCase_ )
def __call__( self : str ):
return self.pa_type
def _lowercase ( self : Optional[Any] , __A : Tuple ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Any = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : Optional[int] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case__ : Any = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
snake_case__ : List[str] = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Optional[int] , __A : Optional[int] , __A : List[str] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
snake_case__ : Union[str, Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
snake_case__ : Dict = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
snake_case__ : Any = token_per_repo_id or {}
snake_case__ : str = path.split("::" )[-1]
try:
snake_case__ : Tuple = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
snake_case__ : List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : List[Any] = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
snake_case__ : Optional[int] = sf.read(__UpperCAmelCase )
else:
snake_case__ : Any = sf.read(__UpperCAmelCase )
snake_case__ : Optional[Any] = array.T
if self.mono:
snake_case__ : List[Any] = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : Optional[int] = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
snake_case__ : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase ( self : Tuple ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase ( self : List[str] , __A : Tuple ):
if pa.types.is_string(storage.type ):
snake_case__ : Dict = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
snake_case__ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : str = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
snake_case__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
snake_case__ : Optional[Any] = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
snake_case__ : Dict = storage.field("bytes" )
else:
snake_case__ : Union[str, Any] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
snake_case__ : str = storage.field("path" )
else:
snake_case__ : List[Any] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def _lowercase ( self : Dict , __A : str ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__UpperCAmelCase , "rb" ) as f:
snake_case__ : Dict = f.read()
return bytes_
snake_case__ : Tuple = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : Union[str, Any] = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
snake_case__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 297 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase = logging.get_logger(__name__)
# General docstring
lowerCamelCase = """PoolFormerConfig"""
# Base docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = """tabby, tabby cat"""
lowerCamelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
a__ : Tuple = 1 - drop_prob
a__ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a__ : Dict = keep_prob + torch.rand(__UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a__ : Optional[int] = input.div(__UpperCamelCase ) * random_tensor
return output
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = drop_prob
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return drop_path(__UpperCAmelCase , self.drop_prob , self.training )
def _A ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a__ : List[str] = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a__ : Union[str, Any] = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase )
a__ : Optional[Any] = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity()
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.projection(__UpperCAmelCase )
a__ : Union[str, Any] = self.norm(__UpperCAmelCase )
return embeddings
class _a ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : List[str] = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.pool(__UpperCAmelCase ) - hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : str = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : Any = PoolFormerDropPath(__UpperCAmelCase )
if isinstance(config.hidden_act , __UpperCAmelCase ):
a__ : List[Any] = ACTaFN[config.hidden_act]
else:
a__ : Union[str, Any] = config.hidden_act
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : Union[str, Any] = self.act_fn(__UpperCAmelCase )
a__ : Dict = self.drop(__UpperCAmelCase )
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : int = self.drop(__UpperCAmelCase )
return hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = PoolFormerPooling(__UpperCAmelCase )
a__ : Any = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Any = PoolFormerGroupNorm(__UpperCAmelCase )
a__ : Dict = PoolFormerGroupNorm(__UpperCAmelCase )
# Useful for training neural nets
a__ : List[Any] = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a__ : List[str] = config.use_layer_scale
if config.use_layer_scale:
a__ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
a__ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if self.use_layer_scale:
a__ : Any = self.pooling(self.before_norm(__UpperCAmelCase ) )
a__ : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a__ : Optional[int] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Dict = ()
a__ : List[Any] = self.output(self.after_norm(__UpperCAmelCase ) )
a__ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a__ : Optional[Any] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Optional[int] = (output,) + outputs
return outputs
else:
a__ : Optional[int] = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) )
# First residual connection
a__ : Tuple = pooling_output + hidden_states
a__ : Tuple = ()
# Second residual connection inside the PoolFormerOutput block
a__ : Optional[int] = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) )
a__ : str = hidden_states + layer_output
a__ : Optional[Any] = (output,) + outputs
return outputs
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = config
# stochastic depth decay rule
a__ : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a__ : List[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
# Transformer blocks
a__ : Optional[int] = []
a__ : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a__ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCAmelCase ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
"""simple docstring"""
a__ : int = () if output_hidden_states else None
a__ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a__ , a__ : Optional[Any] = layers
# Get patch embeddings from hidden_states
a__ : Any = embedding_layer(__UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCAmelCase ):
a__ : List[Any] = blk(__UpperCAmelCase )
a__ : Tuple = layer_outputs[0]
if output_hidden_states:
a__ : Optional[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = PoolFormerConfig
A :List[str] = "poolformer"
A :Tuple = "pixel_values"
A :List[str] = True
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict = value
lowerCamelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config
a__ : int = PoolFormerEncoder(__UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def _A ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a__ : List[Any] = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Any = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = self.dense(__UpperCAmelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config.num_labels
a__ : int = PoolFormerModel(__UpperCAmelCase )
# Final norm
a__ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a__ : Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
a__ : str = self.poolformer(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Optional[Any] = outputs[0]
a__ : Union[str, Any] = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) )
a__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ : str = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ : List[str] = "single_label_classification"
else:
a__ : str = "multi_label_classification"
if self.config.problem_type == "regression":
a__ : int = MSELoss()
if self.num_labels == 1:
a__ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a__ : Union[str, Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a__ : Any = CrossEntropyLoss()
a__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ : Union[str, Any] = BCEWithLogitsLoss()
a__ : Dict = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
a__ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 191 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
lowerCAmelCase__ = {
'google/pegasus-xsum': 512,
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PegasusTokenizer
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<mask_2>" , SCREAMING_SNAKE_CASE_="<mask_1>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1_03 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is'
f' {type(SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
SCREAMING_SNAKE_CASE_ = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,) | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 0.1 ) -> List[Any]:
'''simple docstring'''
lowercase_ = 3
lowercase_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_a )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 |
def A__ ( _a : list ):
'''simple docstring'''
if any(not isinstance(_a , _a ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 385 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 527 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 527 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
A = 'src/transformers'
# Matches is_xxx_available()
A = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A = re.compile(r'^\s*try:')
# Catches a line with else:
A = re.compile(r'^\s*else:')
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Optional[int]:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
_lowerCamelCase = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : Optional[int] ) -> Optional[int]:
with open(UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
_lowerCamelCase = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
_lowerCamelCase = re.findall('\[([^\]]+)\]' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_lowerCamelCase = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
_lowerCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_lowerCamelCase = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
_lowerCamelCase = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(', ' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
_lowerCamelCase = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(', ' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ) -> Any:
def find_duplicates(UpperCamelCase : List[Any] ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase = []
for key in import_dict_objects.keys():
_lowerCamelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowerCamelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase ( ) -> List[Any]:
_lowerCamelCase = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
_lowerCamelCase = os.path.join(UpperCamelCase , '__init__.py' )
_lowerCamelCase = parse_init(UpperCamelCase )
if objects is not None:
_lowerCamelCase = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
_lowerCamelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('\n\n'.join(UpperCamelCase ) )
def lowerCamelCase ( ) -> Optional[Any]:
_lowerCamelCase = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
_lowerCamelCase = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
_lowerCamelCase = short_path.replace(os.path.sep , '.' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
_lowerCamelCase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase ( ) -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(UpperCamelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase = spec.loader.load_module()
_lowerCamelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase ) > 0:
_lowerCamelCase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 544 | import functools
from typing import Any
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : list[str] ) -> bool:
# Validation
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(
isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase = {}
_lowerCamelCase = 'WORD_KEEPER'
for word in words:
_lowerCamelCase = trie
for c in word:
if c not in trie_node:
_lowerCamelCase = {}
_lowerCamelCase = trie_node[c]
_lowerCamelCase = True
_lowerCamelCase = len(UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase : int ) -> bool:
if index == len_string:
return True
_lowerCamelCase = trie
for i in range(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = trie_node.get(string[i] , UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase , UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = tempfile.mkdtemp()
# fmt: off
a__ = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__ = dict(zip(_a , range(len(_a ) ) ) )
a__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a__ = {'unk_token': '<unk>'}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
a__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
a__ = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_a , _a )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_a )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_a )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a__ = self.get_image_processor(do_normalize=_a )
a__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
a__ = self.prepare_image_inputs()
a__ = image_processor(_a , return_tensors='np' )
a__ = processor(images=_a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
a__ = 'lower newer'
a__ = processor(text=_a , return_tensors='np' )
a__ = tokenizer(_a , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
a__ = 'lower newer'
a__ = self.prepare_image_inputs()
a__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'google/owlvit-base-patch32'
a__ = OwlViTProcessor.from_pretrained(_a )
a__ = ['cat', 'nasa badge']
a__ = processor(text=_a )
a__ = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'google/owlvit-base-patch32'
a__ = OwlViTProcessor.from_pretrained(_a )
a__ = [['cat', 'nasa badge'], ['person']]
a__ = processor(text=_a )
a__ = 16
a__ = len(_a )
a__ = max([len(_a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'google/owlvit-base-patch32'
a__ = OwlViTProcessor.from_pretrained(_a )
a__ = ['cat', 'nasa badge']
a__ = processor(text=_a )
a__ = 16
a__ = inputs['input_ids']
a__ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
a__ = self.prepare_image_inputs()
a__ = self.prepare_image_inputs()
a__ = processor(images=_a , query_images=_a )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=_a , image_processor=_a )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(_a )
a__ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 126 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
a__ = val
a__ = None
a__ = None
def lowercase__ ( self , _a ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
a__ = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
a__ = Node(_a )
else:
self.right.insert(_a )
else:
a__ = val
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] ):
# Recursive traversal
if root:
inorder(root.left , a )
res.append(root.val )
inorder(root.right , a )
def lowerCAmelCase_ ( a : List[str] ):
# Build BST
if len(a ) == 0:
return arr
a__ = Node(arr[0] )
for i in range(1 , len(a ) ):
root.insert(arr[i] )
# Traverse BST in order.
a__ = []
inorder(a , a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 126 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =XLMRobertaTokenizer
UpperCamelCase_ : Dict =XLMRobertaTokenizerFast
UpperCamelCase_ : Any =True
UpperCamelCase_ : Union[str, Any] =True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase :Union[str, Any] = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = '''<pad>'''
UpperCamelCase :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowercase ) , 1002 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
UpperCamelCase :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase :str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase :Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase :Optional[Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCAmelCase ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
UpperCamelCase :Optional[Any] = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
UpperCamelCase :Any = tempfile.mkdtemp()
UpperCamelCase :List[Any] = tokenizer_r.save_pretrained(__lowercase )
UpperCamelCase :Union[str, Any] = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCamelCase :int = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
UpperCamelCase :Optional[int] = tokenizer_r.from_pretrained(__lowercase )
UpperCamelCase :str = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
UpperCamelCase :List[str] = tempfile.mkdtemp()
UpperCamelCase :Union[str, Any] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
UpperCamelCase :Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
UpperCamelCase :Tuple = tokenizer_r.from_pretrained(__lowercase )
UpperCamelCase :List[str] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
UpperCamelCase :int = tempfile.mkdtemp()
UpperCamelCase :int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
UpperCamelCase :Any = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase :Any = tokenizer_r.from_pretrained(__lowercase )
UpperCamelCase :int = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@cached_property
def UpperCAmelCase ( self ) -> List[str]:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def UpperCAmelCase ( self ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowercase , f.name )
UpperCamelCase :Union[str, Any] = XLMRobertaTokenizer(f.name , keep_accents=__lowercase )
UpperCamelCase :Union[str, Any] = pickle.dumps(__lowercase )
pickle.loads(__lowercase )
def UpperCAmelCase ( self ) -> int:
if not self.test_rust_tokenizer:
return
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Tuple = self.get_rust_tokenizer()
UpperCamelCase :Optional[Any] = '''I was born in 92000, and this is falsé.'''
UpperCamelCase :Optional[Any] = tokenizer.tokenize(__lowercase )
UpperCamelCase :Dict = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
UpperCamelCase :int = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
UpperCamelCase :Tuple = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
UpperCamelCase :int = self.get_rust_tokenizer()
UpperCamelCase :str = tokenizer.encode(__lowercase )
UpperCamelCase :str = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = '''Hello World!'''
UpperCamelCase :Tuple = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCamelCase :List[str] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = {'''input_ids''': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 658 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self : List[Any] , __lowercase : str , __lowercase : Any , __lowercase : str ):
"""simple docstring"""
__lowercase =name
__lowercase =value
__lowercase =weight
def __repr__( self : List[str] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def snake_case ( self : List[Any] ):
"""simple docstring"""
return self.value
def snake_case ( self : str ):
"""simple docstring"""
return self.name
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.weight
def snake_case ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : List[str], lowercase__ : str ):
'''simple docstring'''
__lowercase =[]
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : List[Any], lowercase__ : str ):
'''simple docstring'''
__lowercase =sorted(lowercase__, key=lowercase__, reverse=lowercase__ )
__lowercase =[]
__lowercase , __lowercase =0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowerCAmelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowerCAmelCase = TaTokenizerFast
_lowerCAmelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowerCAmelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 703 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 0 |
'''simple docstring'''
import cmath
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ):
_A = math.radians(__snake_case )
_A = math.radians(__snake_case )
# Convert voltage and current to rectangular form
_A = cmath.rect(__snake_case , __snake_case )
_A = cmath.rect(__snake_case , __snake_case )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = projection_dim
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
_A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int:
_A = TFDPRContextEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int:
_A = TFDPRQuestionEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any:
_A = TFDPRReader(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = TFDPRModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_A = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[Any] = 'sequence-classification'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if type(SCREAMING_SNAKE_CASE_ ) == dict:
lowerCAmelCase__ = Namespace(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = glue_output_modes[hparams.task]
lowerCAmelCase__ = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.mode )
def __snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.model(**SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ = self(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = outputs[0]
lowerCAmelCase__ = self.trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.hparams
lowerCAmelCase__ = processors[args.task]()
lowerCAmelCase__ = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCAmelCase__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase__ = convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
lowerCAmelCase__ = '''dev''' if mode == '''test''' else mode
lowerCAmelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCAmelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , batch_size=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ = self(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ = outputs[:2]
lowerCAmelCase__ = logits.detach().cpu().numpy()
lowerCAmelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ = np.squeeze(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
lowerCAmelCase__ = dict(results.items() )
lowerCAmelCase__ = results
return ret, preds_list, out_label_list
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCAmelCase_ () -> str:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser()
add_generic_args(lowercase__ , os.getcwd() )
lowerCAmelCase__ = GLUETransformer.add_model_specific_args(lowercase__ , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase__ = os.path.join(
'''./results''' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
lowerCAmelCase__ = GLUETransformer(lowercase__ )
lowerCAmelCase__ = generic_train(lowercase__ , lowercase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=lowercase__ ) )
lowerCAmelCase__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowercase__ )
if __name__ == "__main__":
main()
| 288 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : pyspark.sql.DataFrame , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : Optional[Features] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "arrow" , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
super().__init__(
split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , working_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def __snake_case ( self : List[str] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 288 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_lowerCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None ) -> List[Any]:
UpperCAmelCase_ = True
while ask_again:
UpperCAmelCase_ = input(lowerCAmelCase_ )
try:
if default is not None and len(lowerCAmelCase_ ) == 0:
return default
return convert_value(lowerCAmelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : str=[] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=0 ) -> Dict:
UpperCAmelCase_ = BulletMenu(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = menu.run(default_choice=lowerCAmelCase_ )
return convert_value(lowerCAmelCase_ ) if convert_value is not None else result
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Union[str, Any]:
UpperCAmelCase_ = int(lowerCAmelCase_ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
UpperCAmelCase_ = int(lowerCAmelCase_ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
UpperCAmelCase_ = int(lowerCAmelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
UpperCAmelCase_ = int(lowerCAmelCase_ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[str]:
UpperCAmelCase_ = int(lowerCAmelCase_ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
return {"yes": True, "no": False}[value.lower()]
class a ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str , __snake_case : Dict ):
UpperCAmelCase_ = super()._format_usage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 144 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : float , _lowerCAmelCase : Callable , _lowerCAmelCase : int , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : str = None , ) -> str:
"""simple docstring"""
super().__init__()
snake_case_ = initial_learning_rate
snake_case_ = warmup_steps
snake_case_ = power
snake_case_ = decay_schedule_fn
snake_case_ = name
def __call__( self : Any , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case_ = tf.cast(_lowerCAmelCase , tf.floataa )
snake_case_ = tf.cast(self.warmup_steps , tf.floataa )
snake_case_ = global_step_float / warmup_steps_float
snake_case_ = self.initial_learning_rate * tf.math.pow(_lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCAmelCase , )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 0.9 , lowerCAmelCase_ :float = 0.9_9_9 , lowerCAmelCase_ :float = 1e-8 , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :Optional[float] = None , lowerCAmelCase_ :float = 0.0 , lowerCAmelCase_ :float = 1.0 , lowerCAmelCase_ :Optional[List[str]] = None , )->Optional[Any]:
'''simple docstring'''
snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
snake_case_ = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
snake_case_ = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=lowerCAmelCase_ , )
else:
snake_case_ = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : Dict , _lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCAmelCase : float = 0.9 , _lowerCAmelCase : float = 0.999 , _lowerCAmelCase : float = 1e-7 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "AdamWeightDecay" , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = weight_decay_rate
snake_case_ = include_in_weight_decay
snake_case_ = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls : Dict , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = {"WarmUp": WarmUp}
return super(_lowerCAmelCase , cls ).from_config(_lowerCAmelCase , custom_objects=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super(_lowerCAmelCase , self )._prepare_local(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = list(zip(*_lowerCAmelCase ) )
return super(_lowerCAmelCase , self ).apply_gradients(zip(_lowerCAmelCase , _lowerCAmelCase ) , name=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case_ = apply_state or {}
snake_case_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case_ = self._fallback_apply_state(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_dense(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , _lowerCAmelCase )
snake_case_ = self._decay_weights_op(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCAmelCase , self )._resource_apply_sparse(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowerCAmelCase , _lowerCAmelCase ) is not None:
return False
return True
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : str ) -> Dict:
"""simple docstring"""
snake_case_ = []
snake_case_ = None
@property
def lowerCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
if self._accum_steps is None:
snake_case_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self._gradients:
snake_case_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowerCAmelCase ) , trainable=_lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(_lowerCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , _lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowerCAmelCase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowerCAmelCase ) )
| 283 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def __UpperCamelCase ( self : List[str]):
super().setUp()
UpperCamelCase__ : Union[str, Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
UpperCamelCase__ : List[Any] = dict(zip(__A , range(len(__A))))
UpperCamelCase__ : Dict = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
UpperCamelCase__ : str = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__A))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Optional[Any]):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Tuple = 'adapt act apte'
UpperCamelCase__ : Tuple = 'adapt act apte'
return input_text, output_text
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : int = 'adapt act apte'
UpperCamelCase__ : str = ['adapt', 'act', 'ap@@', 'te']
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(__A)
self.assertListEqual(__A , __A)
UpperCamelCase__ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , __A)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [1_384]
UpperCamelCase__ : Union[str, Any] = 'I am a small frog.'
UpperCamelCase__ : Tuple = tok([src_text] , padding=__A , truncation=__A)['input_ids']
UpperCamelCase__ : int = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
UpperCamelCase__ : Dict = 'I am a small frog .'
UpperCamelCase__ : str = '.'
UpperCamelCase__ : Optional[int] = tok(__A)['input_ids']
UpperCamelCase__ : List[Any] = tok(__A)['input_ids']
assert encoded[-1] == encoded_dot[0]
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'detr'
SCREAMING_SNAKE_CASE : List[str] = ['past_key_values']
SCREAMING_SNAKE_CASE : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int ,lowercase__ : Dict=True ,lowercase__ : Optional[Any]=None ,lowercase__ : str=3 ,lowercase__ : Optional[int]=1_0_0 ,lowercase__ : Optional[int]=6 ,lowercase__ : Optional[Any]=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : int=6 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : List[str]=0.0 ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Union[str, Any]=True ,lowercase__ : str="relu" ,lowercase__ : int=2_5_6 ,lowercase__ : int=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : int=0.0_2 ,lowercase__ : Dict=1.0 ,lowercase__ : str=False ,lowercase__ : Union[str, Any]="sine" ,lowercase__ : List[str]="resnet50" ,lowercase__ : Dict=True ,lowercase__ : List[Any]=False ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : Tuple=2 ,lowercase__ : str=1 ,lowercase__ : List[Any]=1 ,lowercase__ : Dict=5 ,lowercase__ : Optional[Any]=2 ,lowercase__ : int=0.1 ,**lowercase__ : Dict ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = backbone_config.get('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowercase__ )
# set timm attributes to None
__lowercase , __lowercase , __lowercase = None, None, None
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ,lowercase__ : PretrainedConfig ,**lowercase__ : Any ):
return cls(backbone_config=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return 1_2
| 41 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Dict = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 50000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
__UpperCAmelCase : int = int((limit - 24) ** (1 / 2) )
__UpperCAmelCase : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase_ ) ) )
for primea in primes:
__UpperCAmelCase : List[Any] = primea * primea
for primea in primes:
__UpperCAmelCase : List[str] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__UpperCAmelCase : Dict = primea * primea * primea * primea
__UpperCAmelCase : Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
lowerCamelCase__ : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ = {}
with open(__lowerCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
snake_case__ = line.strip()
if line:
snake_case__ = line.split()
snake_case__ = line_number
snake_case__ = words[0]
snake_case__ = value
return result
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
for attribute in key.split('''.''' ):
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
snake_case__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case__ = '''param'''
if weight_type is not None and weight_type != "param":
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = shape_pointer.shape
# let's reduce dimension
snake_case__ = value[0]
else:
snake_case__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = value
else:
snake_case__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
snake_case__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
snake_case__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case__ = '''param'''
if weight_type is not None and weight_type != "param":
snake_case__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ = '''.'''.join([key, hf_param_name] )
else:
snake_case__ = key
snake_case__ = value if '''lm_head''' in full_key else value[0]
lowerCamelCase__ : Optional[int] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Union[str, Any]:
snake_case__ = False
for key, mapped_key in MAPPING.items():
snake_case__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
snake_case__ = mapped_key.replace('''*''' , __lowerCAmelCase )
if "weight_g" in name:
snake_case__ = '''weight_g'''
elif "weight_v" in name:
snake_case__ = '''weight_v'''
elif "bias" in name:
snake_case__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ = '''weight'''
else:
snake_case__ = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
snake_case__ = True
else:
snake_case__ = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = full_name.split('''conv_layers.''' )[-1]
snake_case__ = name.split('''.''' )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ) -> List[str]:
if config_path is not None:
snake_case__ = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = WavaVecaConfig()
if is_seq_class:
snake_case__ = read_txt_into_dict(__lowerCAmelCase )
snake_case__ = idalabel
snake_case__ = WavaVecaForSequenceClassification(__lowerCAmelCase )
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
snake_case__ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ = target_dict.pad_index
snake_case__ = target_dict.bos_index
snake_case__ = target_dict.eos_index
snake_case__ = len(target_dict.symbols )
snake_case__ = os.path.join(__lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ = 0
snake_case__ = 1
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , )
snake_case__ = True if config.feat_extract_norm == '''layer''' else False
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case__ = WavaVecaForCTC(__lowerCAmelCase )
else:
snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case__ = argparse.Namespace(task='''audio_pretraining''' )
snake_case__ = fairseq.tasks.setup_task(__lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
snake_case__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
lowerCamelCase__ : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 33 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
SCREAMING_SNAKE_CASE__ = input_file.read()
SCREAMING_SNAKE_CASE__ = regexp.search(_snake_case )
return match
def lowerCAmelCase_ ( self : str , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
SCREAMING_SNAKE_CASE__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ = regexp.finditer(_snake_case )
SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_snake_case ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_snake_case ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 159 | 0 |
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
lowerCAmelCase_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCAmelCase_ = n - k
# Calculate C(n,k)
for i in range(A ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , A ) // (node_count + 1)
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
lowerCAmelCase_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
return catalan_number(A ) * factorial(A )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 721 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = torch.nn.Linear(2 , 4 )
lowerCAmelCase_ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase_ = torch.optim.lr_scheduler.OneCycleLR(A , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase_ ( A : List[Any] ):
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase_ ( A : Any ):
"""simple docstring"""
lowerCAmelCase_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A )
class UpperCamelCase_ ( A ):
'''simple docstring'''
@require_cuda
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = Accelerator(cpu=_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def lowercase__ ( self):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCAmelCase , **_UpperCAmelCase):
pass
with patch('''torch.cuda.set_device''' , _UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64'''):
lowerCAmelCase_ = Accelerator()
self.assertEqual(str(accelerator.state.device) , '''cuda:64''')
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = get_signature(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = get_signature(_UpperCAmelCase)
# saving hook
def save_config(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(_UpperCAmelCase , '''data.json''') , '''w''') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
# loading hook
def load_config(_UpperCAmelCase , _UpperCAmelCase):
with open(os.path.join(_UpperCAmelCase , '''data.json''') , '''r''') as f:
lowerCAmelCase_ = json.load(_UpperCAmelCase)
lowerCAmelCase_ = config['''class_name''']
lowerCAmelCase_ = accelerator.register_save_state_pre_hook(_UpperCAmelCase)
lowerCAmelCase_ = accelerator.register_load_state_pre_hook(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
lowerCAmelCase_ = None
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
lowerCAmelCase_ = [1, 2, 3]
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map={'''''': 0} , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@slow
@require_bnb
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = Accelerator()
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
lowerCAmelCase_ = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@require_cuda
def lowercase__ ( self):
lowerCAmelCase_ = torch.nn.Linear(10 , 10)
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , lr=0.01)
lowerCAmelCase_ = Accelerator(cpu=_UpperCAmelCase)
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
| 413 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE ( snake_case__ ):
__lowerCamelCase : str =['vqvae']
def __init__( self : Dict , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case , mel=_snake_case , vqvae=_snake_case )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , _snake_case ) else 1000
@torch.no_grad()
def __call__( self : List[str] , __lowercase : str = 1 , __lowercase : Tuple = None , __lowercase : str = None , __lowercase : Tuple = 0 , __lowercase : Optional[Any] = 0 , __lowercase : Any = None , __lowercase : Optional[int] = None , __lowercase : Any = 0 , __lowercase : int = 0 , __lowercase : Optional[Any] = None , __lowercase : List[Any] = 0 , __lowercase : Tuple = None , __lowercase : List[str] = None , __lowercase : Optional[int]=True , ):
'''simple docstring'''
__a = steps or self.get_default_steps()
self.scheduler.set_timesteps(_snake_case )
__a = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__a = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__a = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_snake_case , device=self.device , )
__a = noise
__a = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_snake_case , _snake_case )
__a = self.mel.audio_slice_to_image(_snake_case )
__a = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__a = (input_image / 255) * 2 - 1
__a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__a = self.vqvae.encode(torch.unsqueeze(_snake_case , 0 ) ).latent_dist.sample(
generator=_snake_case )[0]
__a = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__a = self.scheduler.add_noise(_snake_case , _snake_case , self.scheduler.timesteps[start_step - 1] )
__a = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__a = int(mask_start_secs * pixels_per_second )
__a = int(mask_end_secs * pixels_per_second )
__a = self.scheduler.add_noise(_snake_case , _snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _snake_case ):
__a = self.unet(_snake_case , _snake_case , _snake_case )["sample"]
else:
__a = self.unet(_snake_case , _snake_case )["sample"]
if isinstance(self.scheduler , _snake_case ):
__a = self.scheduler.step(
model_output=_snake_case , timestep=_snake_case , sample=_snake_case , eta=_snake_case , generator=_snake_case , )["prev_sample"]
else:
__a = self.scheduler.step(
model_output=_snake_case , timestep=_snake_case , sample=_snake_case , generator=_snake_case , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__a = mask[:, step, :, :mask_start]
if mask_end > 0:
__a = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__a = 1 / self.vqvae.config.scaling_factor * images
__a = self.vqvae.decode(_snake_case )["sample"]
__a = (images / 2 + 0.5).clamp(0 , 1 )
__a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__a = (images * 255).round().astype("""uint8""" )
__a = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_snake_case , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__a = [self.mel.image_to_audio(_snake_case ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(_snake_case ) )
@torch.no_grad()
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , _snake_case )
self.scheduler.set_timesteps(_snake_case )
__a = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__a = (sample / 255) * 2 - 1
__a = torch.Tensor(_snake_case ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__a = self.scheduler.alphas_cumprod[t]
__a = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__a = 1 - alpha_prod_t
__a = self.unet(_snake_case , _snake_case )["sample"]
__a = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowercase : Any , __lowercase : List[Any] , __lowercase : int ):
'''simple docstring'''
__a = acos(torch.dot(torch.flatten(_snake_case ) , torch.flatten(_snake_case ) ) / torch.norm(_snake_case ) / torch.norm(_snake_case ) )
return sin((1 - alpha) * theta ) * xa / sin(_snake_case ) + sin(alpha * theta ) * xa / sin(_snake_case )
| 225 | class __A :
'''simple docstring'''
def __init__( self ):
_lowerCAmelCase : Dict = ""
_lowerCAmelCase : Optional[Any] = ""
_lowerCAmelCase : List[Any] = []
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Optional[int] = self.__min_dist_top_down_dp(_snake_case , n - 1 )
_lowerCAmelCase : List[str] = self.__min_dist_top_down_dp(m - 1 , _snake_case )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Optional[int] = 1 + min(_snake_case , _snake_case , _snake_case )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : int = worda
_lowerCAmelCase : Tuple = [[-1 for _ in range(len(_snake_case ) )] for _ in range(len(_snake_case ) )]
return self.__min_dist_top_down_dp(len(_snake_case ) - 1 , len(_snake_case ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : str = worda
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : str = len(_snake_case )
_lowerCAmelCase : List[Any] = len(_snake_case )
_lowerCAmelCase : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : int = j
elif j == 0: # second string is empty
_lowerCAmelCase : Optional[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : Union[str, Any] = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : Tuple = self.dp[i][j - 1]
_lowerCAmelCase : Dict = self.dp[i - 1][j]
_lowerCAmelCase : List[Any] = self.dp[i - 1][j - 1]
_lowerCAmelCase : Tuple = 1 + min(_snake_case , _snake_case , _snake_case )
return self.dp[m][n]
if __name__ == "__main__":
snake_case = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
snake_case = input("Enter the first string: ").strip()
snake_case = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 424 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = ReformerTokenizer
A : Optional[int] = ReformerTokenizerFast
A : str = True
A : Tuple = False
A : str = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
super().setUp()
a : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : int = '''<s>'''
a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(UpperCAmelCase__) , 1_0_0_0)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a : Any = self.get_tokenizer()
a : str = self.get_rust_tokenizer()
a : Tuple = '''I was born in 92000, and this is falsé.'''
a : str = tokenizer.tokenize(UpperCAmelCase__)
a : int = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
a : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
a : List[str] = self.get_rust_tokenizer()
a : Optional[int] = tokenizer.encode(UpperCAmelCase__)
a : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[Any]=1_5):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
# Simple input
a : Union[str, Any] = '''This is a simple input'''
a : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
a : int = ('''This is a simple input''', '''This is a pair''')
a : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length')
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length')
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length')
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length')
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__)
a : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
a : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
a : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment')
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Any = '''Hello World!'''
a : Optional[Any] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a : Dict = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__))
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
a : Any = list(self.big_tokenizer.get_vocab().keys())[:1_0]
a : Union[str, Any] = ''' '''.join(UpperCAmelCase__)
a : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='pt')
a : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt')
a : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
a : Tuple = encoded_sequence['''input_ids'''].shape
a : List[Any] = ReformerModel(UpperCAmelCase__)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__)
model(**UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = {'''input_ids''': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
a : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 718 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "yolos"
def __init__( self : str , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : Union[str, Any]=[5_1_2, 8_6_4] , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=1_0_0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Union[str, Any] = hidden_size
a : int = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = intermediate_size
a : int = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Dict = initializer_range
a : Union[str, Any] = layer_norm_eps
a : str = image_size
a : Any = patch_size
a : Union[str, Any] = num_channels
a : int = qkv_bias
a : Union[str, Any] = num_detection_tokens
a : Optional[int] = use_mid_position_embeddings
a : str = auxiliary_loss
# Hungarian matcher
a : Optional[Any] = class_cost
a : Union[str, Any] = bbox_cost
a : List[str] = giou_cost
# Loss coefficients
a : Any = bbox_loss_coefficient
a : Optional[Any] = giou_loss_coefficient
a : Union[str, Any] = eos_coefficient
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return 1_2
| 610 | 0 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self ) -> str:
_snake_case = {} # Mapping from char to TrieNode
_snake_case = False
def lowercase (self , UpperCAmelCase ) -> str:
for word in words:
self.insert(UpperCAmelCase_ )
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = self
for char in word:
if char not in curr.nodes:
_snake_case = TrieNode()
_snake_case = curr.nodes[char]
_snake_case = True
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = self
for char in word:
if char not in curr.nodes:
return False
_snake_case = curr.nodes[char]
return curr.is_leaf
def lowercase (self , UpperCAmelCase ) -> Tuple:
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase_ ):
# If word does not exist
if not curr.is_leaf:
return False
_snake_case = False
return len(curr.nodes ) == 0
_snake_case = word[index]
_snake_case = curr.nodes.get(UpperCAmelCase_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_snake_case = _delete(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase_ , 0 )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = """banana bananas bandana band apple all beast""".split()
_snake_case = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def __SCREAMING_SNAKE_CASE ( ):
assert test_trie()
def __SCREAMING_SNAKE_CASE ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 585 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowercase : list[float] , lowercase : list[float] ):
snake_case_ = sorted(numsa + numsa )
snake_case_ , snake_case_ = divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = [float(x) for x in input('''Enter the elements of first array: ''').split()]
lowercase__ = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 508 | 0 |
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = len(__lowerCAmelCase ) + 1
a__ : int = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
a__ : Dict = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
a__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
a__ : Union[str, Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
a__ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
a__ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
a__ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
a__ : List[Any] = dp[i - 1][j]
else:
a__ : Optional[int] = 0
else:
a__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE__ : Any = "aab"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}') | 712 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 0 |
import math
import sys
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = """"""
try:
with open(UpperCAmelCase__ , """rb""" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = {"""0""": """0""", """1""": """1"""}
a_ , a_ = """""", """"""
a_ = len(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
a_ = last_match_id + """0"""
if math.loga(UpperCAmelCase__ ).is_integer():
a_ = {}
for curr_key in list(UpperCAmelCase__ ):
a_ = lexicon.pop(UpperCAmelCase__ )
a_ = new_lex
a_ = last_match_id + """1"""
index += 1
a_ = """"""
return result
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase__ , """wb""" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCAmelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
a_ = data_bits[counter:]
a_ = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase__ )
a_ = remove_prefix(UpperCAmelCase__ )
a_ = decompress_data(UpperCAmelCase__ )
write_file_binary(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 483 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A_ : List[Any] =logging.getLogger()
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = """\n""".join(UpperCAmelCase__ )
Path(UpperCAmelCase__ ).open("""w""" ).writelines(UpperCAmelCase__ )
A_ : str ="""patrickvonplaten/t5-tiny-random"""
A_ : Tuple ="""sshleifer/bart-tiny-random"""
A_ : Tuple ="""sshleifer/tiny-mbart"""
A_ : Tuple =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
a_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
a_ = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_UpperCAmelCase , _UpperCAmelCase )
a_ = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
a_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
a_ = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ):
run_generate()
assert Path(_UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def lowercase__ ( self ):
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
a_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
a_ = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
a_ = Path(self.get_auto_remove_tmp_dir() )
a_ = str(tmp_dir / """scores.json""" )
a_ = str(tmp_dir / """val.target""" )
_dump_articles(_UpperCAmelCase , text["""en"""] )
_dump_articles(_UpperCAmelCase , text["""de"""] )
a_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
a_ = f"\n run_eval_search.py\n {model}\n {str(_UpperCAmelCase )}\n {str(_UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
a_ = [""" num_beams | length_penalty""", model, """Best score args"""]
a_ = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase ).exists()
os.remove(Path(_UpperCAmelCase ) ) | 483 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "num_attention_heads" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __lowercase : Any , __lowercase : List[Any]=13 , __lowercase : Tuple=64 , __lowercase : int=3 , __lowercase : int=3 , __lowercase : Tuple=2 , __lowercase : int=1 , __lowercase : Any=16 , __lowercase : Optional[Any]=[1_28, 2_56, 3_84] , __lowercase : Any=[4, 6, 8] , __lowercase : Any=[2, 3, 4] , __lowercase : Optional[int]=[16, 16, 16] , __lowercase : List[str]=0 , __lowercase : List[str]=[2, 2, 2] , __lowercase : List[Any]=[2, 2, 2] , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=True , __lowercase : str=True , __lowercase : List[str]=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = num_labels
snake_case_ = initializer_range
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = (self.image_size, self.image_size)
snake_case_ , snake_case_ = image_size[0], image_size[1]
for _ in range(4 ):
snake_case_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def snake_case__ ( self : Dict , __lowercase : List[str] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = LevitModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def snake_case__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : int , __lowercase : Optional[int] , __lowercase : int ):
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case_ = (self.model_tester.image_size, self.model_tester.image_size)
snake_case_ , snake_case_ = image_size[0], image_size[1]
for _ in range(4 ):
snake_case_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
def snake_case__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Union[str, Any]=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
snake_case_ = model(**__UpperCamelCase ).loss
loss.backward()
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case_ = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
snake_case_ = model(**__UpperCamelCase ).loss
loss.backward()
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
snake_case_ = problem_type["title"]
snake_case_ = problem_type["num_labels"]
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
snake_case_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
snake_case_ = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 707 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase__ : Optional[int] = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe.dual_guided(
prompt="first prompt" , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
snake_case_ = VersatileDiffusionPipeline.from_pretrained(__lowercase , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case_ = generator.manual_seed(0 )
snake_case_ = pipe.dual_guided(
prompt="first prompt" , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case_ = "cyberpunk 2077"
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe.dual_guided(
prompt=__lowercase , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case_ = "A painting of a squirrel eating a burger "
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe.text_to_image(
prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
snake_case_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case_ = pipe.image_variation(__lowercase , generator=__lowercase , output_type="numpy" ).images
snake_case_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 139 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : List[Any]=[10, 20, 30, 40] , __lowerCAmelCase : str=[2, 2, 3, 2] , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[Any]=10 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[Any]=[2, 3, 4] , __lowerCAmelCase : List[Any]=None , ) -> Optional[int]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = num_stages
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = intermediate_size
_A = hidden_act
_A = num_labels
_A = initializer_range
_A = out_features
_A = out_indices
_A = scope
def snake_case_ ( self : Any ) -> List[Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Tuple ) -> List[str]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[str]:
_A = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Optional[Any]:
_A = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> int:
_A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A = None
_A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self : Any ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a__ : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a__ : Optional[Any] = False
a__ : Any = False
a__ : Dict = False
a__ : Optional[Any] = False
a__ : Optional[Any] = False
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = ConvNextVaModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : Any ) -> str:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def snake_case_ ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
pass
def snake_case_ ( self : int ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_A , _A = self.model_tester.prepare_config_and_inputs_with_labels()
_A = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = model(**__lowerCAmelCase ).loss
loss.backward()
def snake_case_ ( self : List[str] ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_A , _A = self.model_tester.prepare_config_and_inputs_with_labels()
_A = False
_A = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = model(**__lowerCAmelCase ).loss
loss.backward()
def snake_case_ ( self : Union[str, Any] ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__lowerCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> Any:
def check_hidden_states_output(__lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ):
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def snake_case_ ( self : List[str] ) -> Dict:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def snake_case_ ( self : Tuple ) -> int:
_A = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(__lowerCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = preprocessor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**__lowerCAmelCase )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_A = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 2 |
from __future__ import annotations
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) == 0:
return []
lowercase_ , lowercase_ = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
lowercase_ = int(max_value - min_value ) + 1
lowercase_ = [[] for _ in range(UpperCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCAmelCase__ )
return [v for bucket in buckets for v in sorted(UpperCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 412 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | '''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''mock-s3-bucket'''
UpperCAmelCase_ = F"""s3://{mock_bucket}"""
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase_ = '''./local/path'''
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
UpperCAmelCase_ = fsspec.filesystem('''file''' )
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCAmelCase_ = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
UpperCAmelCase_ = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = os.path.basename(_UpperCamelCase )
UpperCAmelCase_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCAmelCase_ = compressed_file_paths[protocol]
UpperCAmelCase_ = '''dataset.jsonl'''
UpperCAmelCase_ = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase_ , *UpperCAmelCase_ = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
UpperCAmelCase_ = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 390 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
UpperCAmelCase__ = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Optional[int]:
_snake_case = list(state_dict.keys() )
for name in state_dict_keys:
_snake_case = state_dict.pop(__lowerCamelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
_snake_case = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
_snake_case = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
_snake_case = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , __lowerCamelCase )
# ffn -> feed_forward
_snake_case = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , __lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
_snake_case = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
_snake_case = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
_snake_case = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
_snake_case = '''rwkv.''' + name
_snake_case = weight
return state_dict
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=None ) -> Any:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
_snake_case = 5_02_77
_snake_case = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
_snake_case = PreTrainedTokenizerFast(tokenizer_file=__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
# 2. Build the config
_snake_case = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_snake_case = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
_snake_case = RwkvConfig(
vocab_size=__lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCamelCase )
# 3. Download model file then convert state_dict
_snake_case = hf_hub_download(__lowerCamelCase , __lowerCamelCase )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = convert_state_dict(__lowerCamelCase )
# 4. Split in shards and save
_snake_case , _snake_case = shard_checkpoint(__lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if index is not None:
_snake_case = os.path.join(__lowerCamelCase , __lowerCamelCase )
# Save the index as well
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
_snake_case = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
_snake_case = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_snake_case = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
_snake_case = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
UpperCAmelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 430 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=100 , _lowerCamelCase : Any=13 , _lowerCamelCase : Optional[int]=30 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=True , _lowerCamelCase : str=True , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=5 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Optional[Any]=37 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=3 , ):
_snake_case = parent
_snake_case = vocab_size
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def lowercase ( self : Dict ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase ( self : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
_snake_case = FlaxBeitModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Any ):
_snake_case = FlaxBeitForMaskedImageModeling(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = self.type_sequence_label_size
_snake_case = FlaxBeitForImageClassification(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = FlaxBeitForImageClassification(_lowerCamelCase )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(_lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase ( self : Any ):
_snake_case = FlaxBeitModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
_snake_case = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
_snake_case = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_snake_case = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
_snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Union[str, Any] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : List[Any] ):
_snake_case = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
_snake_case = np.ones((1, 196) , dtype=_lowerCamelCase )
# forward pass
_snake_case = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
_snake_case = outputs.logits
# verify the logits
_snake_case = (1, 196, 8192)
self.assertEqual(logits.shape , _lowerCamelCase )
_snake_case = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1e-2 ) )
@slow
def lowercase ( self : str ):
_snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
_snake_case = outputs.logits
# verify the logits
_snake_case = (1, 1000)
self.assertEqual(logits.shape , _lowerCamelCase )
_snake_case = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
_snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
_snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
_snake_case = outputs.logits
# verify the logits
_snake_case = (1, 21841)
self.assertEqual(logits.shape , _lowerCamelCase )
_snake_case = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
_snake_case = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
| 430 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-text"
def __init__( self : int, _UpperCAmelCase : Optional[int]=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : int=1_2, _UpperCAmelCase : Any=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : int=5_1_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : int=0.02, _UpperCAmelCase : Optional[int]=1E-12, _UpperCAmelCase : List[Any]=1, _UpperCAmelCase : Optional[int]=0, _UpperCAmelCase : Dict=2, _UpperCAmelCase : Any="absolute", _UpperCAmelCase : str=True, _UpperCAmelCase : str=None, **_UpperCAmelCase : List[Any], ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_dropout
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@property
def A_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 663 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 663 | 1 |
'''simple docstring'''
def _a ( __lowerCAmelCase : list ):
"""simple docstring"""
if len(__lowerCAmelCase ) <= 1:
return lst
snake_case__ : Any = 1
while i < len(__lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__ , snake_case__ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : List[Any] = 1
return lst
if __name__ == "__main__":
lowerCAmelCase__ : Any = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 502 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''width_multiplier''' ) )
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict=1_3 , snake_case_ : Any=6_4 , snake_case_ : Dict=2 , snake_case_ : Optional[int]=3 , snake_case_ : str="swish" , snake_case_ : str=3 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Dict=1_0 , snake_case_ : Optional[int]=None , snake_case_ : str=0.2_5 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = conv_kernel_size
snake_case__ : Optional[int] = output_stride
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : str = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = width_multiplier
snake_case__ : Optional[Any] = ffn_dropout
snake_case__ : Dict = attn_dropout
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = MobileViTVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[Any] = MobileViTVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Any = MobileViTVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = MobileViTVaModelTester(self )
snake_case__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(snake_case_ ) , snake_case_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(snake_case_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileViTVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
snake_case_ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Tuple = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : Any = model.to(snake_case_ )
snake_case__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**snake_case_ )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : List[str] = model.to(snake_case_ )
snake_case__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0, 6_0)] )
snake_case__ : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
snake_case__ : Any = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 502 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase__ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
lowerCAmelCase : Tuple = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _snake_case , )
is not None
):
lowerCAmelCase : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase : List[Any] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCAmelCase : Optional[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCAmelCase : List[str] = True
if not attribute_used:
lowerCAmelCase : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase : Optional[Any] = True
elif attribute.endswith("_token_id" ):
lowerCAmelCase : List[Any] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Any = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCAmelCase : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase : int = inspect.getsourcefile(_snake_case )
lowerCAmelCase : Any = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase : Optional[Any] = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("modeling_" )]
# Get the source code strings
lowerCAmelCase : Any = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase : List[str] = []
for config_param, default_value in zip(_snake_case , _snake_case ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase : List[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase : Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(_snake_case )
and issubclass(_snake_case , _snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase : Union[str, Any] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : str = unused_attributes
if len(_snake_case ) > 0:
lowerCAmelCase : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 645 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = cva.getAffineTransform(_snake_case ,_snake_case )
return cva.warpAffine(_snake_case ,_snake_case ,(rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
UpperCAmelCase__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ , UpperCAmelCase__ : str = gray_img.shape
# set different points to rotate image
UpperCAmelCase__ : List[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCAmelCase__ : List[str] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ : List[Any] = plt.figure(1)
UpperCAmelCase__ : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 223 | 0 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A = datasets.load_iris()
_A = np.array(data["""data"""])
_A = np.array(data["""target"""])
_A = data["""target_names"""]
_A , _A , _A , _A = train_test_split(X, y)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
return np.linalg.norm(np.array(__UpperCAmelCase ) - np.array(__UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> Tuple:
lowerCAmelCase__ : Any = zip(__UpperCAmelCase , __UpperCAmelCase )
# List of distances of all points from the point to be classified
lowerCAmelCase__ : Dict = []
for data_point in data:
lowerCAmelCase__ : List[Any] = euclidean_distance(data_point[0] , __UpperCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase__ : Union[str, Any] = [i[1] for i in sorted(__UpperCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase__ : int = Counter(__UpperCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 507 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_A = logging.get_logger("""transformers.models.speecht5""")
_A = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_A = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_A = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_A = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_A = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_A = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_A = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_A = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = []
_A = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Dict = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : int = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : int = value
elif weight_type == "running_var":
lowerCAmelCase__ : Dict = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : List[str] = value
else:
lowerCAmelCase__ : List[str] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = []
if task == "s2t":
lowerCAmelCase__ : int = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Optional[int] = MAPPING_S2T
lowerCAmelCase__ : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Tuple = MAPPING_T2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Union[str, Any] = MAPPING_S2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCAmelCase , __UpperCAmelCase ):
logger.info(f"""{name} was ignored""" )
continue
lowerCAmelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ : int = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCAmelCase__ : Dict = True
if "*" in mapped_key:
lowerCAmelCase__ : List[Any] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Any = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase__ : Any = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Tuple = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Dict = """bias"""
elif "weight" in name:
lowerCAmelCase__ : Any = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : Any = """running_mean"""
elif "running_var" in name:
lowerCAmelCase__ : List[Any] = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Tuple = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : int = name.split(""".""" )
lowerCAmelCase__ : Optional[Any] = int(items[0] )
lowerCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[Any]:
if config_path is not None:
lowerCAmelCase__ : Dict = SpeechTaConfig.from_pretrained(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
lowerCAmelCase__ : int = config.max_text_positions
lowerCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToText(__UpperCAmelCase )
elif task == "t2s":
lowerCAmelCase__ : Any = 1876
lowerCAmelCase__ : Dict = 600
lowerCAmelCase__ : Union[str, Any] = config.max_speech_positions
lowerCAmelCase__ : List[Any] = SpeechTaForTextToSpeech(__UpperCAmelCase )
elif task == "s2s":
lowerCAmelCase__ : Optional[Any] = 1876
lowerCAmelCase__ : Optional[Any] = config.max_speech_positions
lowerCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(__UpperCAmelCase )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowerCAmelCase__ : int = SpeechTaTokenizer(__UpperCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken("""<mask>""" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCAmelCase__ : Dict = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCAmelCase__ : Union[str, Any] = SpeechTaFeatureExtractor()
lowerCAmelCase__ : Optional[int] = SpeechTaProcessor(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.load(__UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , __UpperCAmelCase , __UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 507 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ : Any = logging.getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if metric == "rouge2":
SCREAMING_SNAKE_CASE = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
SCREAMING_SNAKE_CASE = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
SCREAMING_SNAKE_CASE = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
SCREAMING_SNAKE_CASE = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=f"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class UpperCamelCase ( pl.Callback ):
def UpperCamelCase ( self : List[Any] , snake_case__ : int , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case__ )
@rank_zero_only
def UpperCamelCase ( self : Any , snake_case__ : pl.Trainer , snake_case__ : pl.LightningModule , snake_case__ : str , snake_case__ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
SCREAMING_SNAKE_CASE = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE = od / 'test_results.txt'
SCREAMING_SNAKE_CASE = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=snake_case__ )
generations_file.parent.mkdir(exist_ok=snake_case__ )
with open(snake_case__ , 'a+' ) as writer:
for key in sorted(snake_case__ ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE = metrics[key]
if isinstance(snake_case__ , torch.Tensor ):
SCREAMING_SNAKE_CASE = val.item()
SCREAMING_SNAKE_CASE = F"""{key}: {val:.6f}\n"""
writer.write(snake_case__ )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case__ )
@rank_zero_only
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE = count_trainable_parameters(snake_case__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase ( self : List[str] , snake_case__ : pl.Trainer , snake_case__ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case__ , snake_case__ , 'test' )
@rank_zero_only
def UpperCamelCase ( self : Dict , snake_case__ : pl.Trainer , snake_case__ : Dict ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 439 |
import re
from filelock import FileLock
try:
import nltk
a_ : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 439 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
snake_case_ : Dict = cva.getAffineTransform(_A , _A )
return cva.warpAffine(_A , _A , (rows, cols) )
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_SCREAMING_SNAKE_CASE = gray_img.shape
# set different points to rotate image
_SCREAMING_SNAKE_CASE = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
_SCREAMING_SNAKE_CASE = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
_SCREAMING_SNAKE_CASE = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
_SCREAMING_SNAKE_CASE = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
_SCREAMING_SNAKE_CASE = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_SCREAMING_SNAKE_CASE = plt.figure(1)
_SCREAMING_SNAKE_CASE = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 709 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_SCREAMING_SNAKE_CASE = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_SCREAMING_SNAKE_CASE = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any ) -> Optional[Any]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_A )
def UpperCAmelCase_ ( self : Dict , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = F"""facebook/wmt19-{pair}"""
snake_case_ : List[Any] = self.get_tokenizer(_A )
snake_case_ : List[str] = self.get_model(_A )
snake_case_ : Union[str, Any] = bleu_data[pair]['src']
snake_case_ : List[str] = bleu_data[pair]['tgt']
snake_case_ : Optional[int] = tokenizer(_A , return_tensors='pt' , truncation=_A , padding='longest' ).to(_A )
snake_case_ : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ : Dict = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
snake_case_ : Tuple = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['bleu'] , _A )
| 534 | 0 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ) -> complex:
"""simple docstring"""
__snake_case : Union[str, Any] = symbols(_lowerCamelCase )
__snake_case : Union[str, Any] = lambdify(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : List[str] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
__snake_case : Optional[int] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__snake_case : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 26 |
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Any = name
lowerCamelCase_ : Optional[Any] = value
lowerCamelCase_ : str = weight
def __repr__( self : int ) -> Optional[int]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.value
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.name
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return self.weight
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.value / self.weight
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ , lowerCamelCase_ : Tuple = 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __a ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase (UpperCAmelCase__ : int ):
'''simple docstring'''
if hor == 1_2_8:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 6_4, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
SCREAMING_SNAKE_CASE = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase ():
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 719 | import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase_ : Any = logging.get_logger(__name__)
def __a ( _UpperCamelCase: int , _UpperCamelCase: List[Any] , _UpperCamelCase: Union[str, Any] ) -> int:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __a ( _UpperCamelCase: np.ndarray , _UpperCamelCase: Optional[str] , _UpperCamelCase: Optional[str] = None ) -> str:
"""simple docstring"""
_snake_case = tesseract_config if tesseract_config is not None else ""
# apply OCR
_snake_case = to_pil_image(a__ )
_snake_case , _snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(a__ , lang=a__ , output_type="dict" , config=a__ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(a__ ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(a__ ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(a__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(a__ , a__ , a__ , a__ ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(a__ )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(a__ , a__ , a__ ) )
assert len(a__ ) == len(a__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = ["pixel_values"]
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "" ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**lowerCamelCase_ )
_snake_case = size if size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase_ )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
_snake_case = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
_snake_case = (size["height"], size["width"])
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase_ ) for image in images]
if apply_ocr:
requires_backends(self ,"pytesseract" )
_snake_case = []
_snake_case = []
for image in images:
_snake_case , _snake_case = apply_tesseract(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_snake_case = [flip_channel_order(lowerCamelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
_snake_case = BatchFeature(data={"pixel_values": images} ,tensor_type=lowerCamelCase_ )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 185 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class lowerCamelCase_ ( Generic[T] ):
__lowercase : deque[T] # Cache store of keys
__lowercase : set[T] # References of the keys in cache
__lowercase : int = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = deque()
_UpperCamelCase = set()
if not n:
_UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCamelCase = n
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def lowercase ( self ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self ) -> str:
"""simple docstring"""
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 147 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __snake_case ( unittest.TestCase ):
def __init__( self ,a_ ,a_=13 ,a_=30 ,a_=2 ,a_=3 ,a_=True ,a_=True ,a_=32 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=10 ,a_=0.02 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__A ,initializer_range=self.initializer_range ,)
return config, pixel_values
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = FlaxViTModel(config=__A )
lowerCAmelCase__ = model(__A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (self.image_size, self.image_size)
lowerCAmelCase__ = (self.patch_size, self.patch_size)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = FlaxViTForImageClassification(config=__A )
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = FlaxViTForImageClassification(__A )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(__A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( UpperCamelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = FlaxViTModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(__A ,__A )
lowerCAmelCase__ = model_class(__A )
@jax.jit
def model_jitted(a_ ,**a_ ):
return model(pixel_values=__A ,**__A )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ = model_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) ,len(__A ) )
for jitted_output, output in zip(__A ,__A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained('google/vit-base-patch16-224' )
lowerCAmelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__A )
| 703 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = "x" , snake_case__ = 10**-10 , snake_case__ = 1 , ) -> complex:
"""simple docstring"""
lowerCAmelCase__ = symbols(snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , diff(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = starting_point
while True:
if diff_function(snake_case__ ) != 0:
lowerCAmelCase__ = prev_guess - multiplicity * func(snake_case__ ) / diff_function(
snake_case__ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 604 | 0 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
_lowerCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_lowerCAmelCase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_lowerCAmelCase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
_lowerCAmelCase = os.path.join(get_home_dir() , """models""" )
_lowerCAmelCase = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
_lowerCAmelCase = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
_lowerCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
_lowerCAmelCase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(__lowerCamelCase ),
}
_lowerCAmelCase = BertConfig.from_dict(__lowerCamelCase )
_lowerCAmelCase = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase :Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase :Any , __lowerCamelCase :Tuple ):
_lowerCAmelCase = hf_param.shape
_lowerCAmelCase = to_torch(params[gluon_param] )
_lowerCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
_lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_lowerCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_lowerCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
_lowerCAmelCase = layer.attention.self
_lowerCAmelCase = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
_lowerCAmelCase = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
_lowerCAmelCase = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
_lowerCAmelCase = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
_lowerCAmelCase = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
_lowerCAmelCase = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
_lowerCAmelCase = layer.attention.output
_lowerCAmelCase = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
_lowerCAmelCase = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
_lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
_lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
_lowerCAmelCase = layer.intermediate
_lowerCAmelCase = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
_lowerCAmelCase = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
_lowerCAmelCase = layer.output
_lowerCAmelCase = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
_lowerCAmelCase = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
_lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
_lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-base""" )
_lowerCAmelCase = tokenizer.encode_plus(__lowerCamelCase )["""input_ids"""]
# Get gluon output
_lowerCAmelCase = mx.nd.array([input_ids] )
_lowerCAmelCase = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
_lowerCAmelCase = tokenizer.encode_plus(__lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase = hf_bort_model(**__lowerCamelCase )[0]
_lowerCAmelCase = output_gluon[0].asnumpy()
_lowerCAmelCase = output_hf[0].detach().numpy()
_lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_lowerCAmelCase = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , __lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 5 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Any = 'RegNetConfig'
# Base docstring
_SCREAMING_SNAKE_CASE : Optional[int] = 'facebook/regnet-y-040'
_SCREAMING_SNAKE_CASE : List[str] = [1, 1_088, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'facebook/regnet-y-040'
_SCREAMING_SNAKE_CASE : List[Any] = 'tabby, tabby cat'
_SCREAMING_SNAKE_CASE : Dict = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[str] = "relu" , ):
super().__init__()
_lowercase: Any = nn.Convad(
_UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=kernel_size // 2 , groups=_UpperCamelCase , bias=_UpperCamelCase , )
_lowercase: Optional[Any] = nn.BatchNormad(_UpperCamelCase)
_lowercase: Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Dict):
_lowercase: Any = self.convolution(_UpperCamelCase)
_lowercase: int = self.normalization(_UpperCamelCase)
_lowercase: Tuple = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : RegNetConfig):
super().__init__()
_lowercase: Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_lowercase: List[str] = config.num_channels
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[Any]):
_lowercase: int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
_lowercase: List[str] = self.embedder(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 2):
super().__init__()
_lowercase: Dict = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , stride=_UpperCamelCase , bias=_UpperCamelCase)
_lowercase: Optional[int] = nn.BatchNormad(_UpperCamelCase)
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : Tensor):
_lowercase: Union[str, Any] = self.convolution(_UpperCamelCase)
_lowercase: Union[str, Any] = self.normalization(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int):
super().__init__()
_lowercase: List[Any] = nn.AdaptiveAvgPoolad((1, 1))
_lowercase: Dict = nn.Sequential(
nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1) , nn.ReLU() , nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Any):
# b c h w -> b c 1 1
_lowercase: Tuple = self.pooler(_UpperCamelCase)
_lowercase: Any = self.attention(_UpperCamelCase)
_lowercase: List[str] = hidden_state * attention
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1):
super().__init__()
_lowercase: Union[str, Any] = in_channels != out_channels or stride != 1
_lowercase: Any = max(1 , out_channels // config.groups_width)
_lowercase: Dict = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase) if should_apply_shortcut else nn.Identity()
)
_lowercase: Any = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase) , )
_lowercase: Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Optional[int]):
_lowercase: Union[str, Any] = hidden_state
_lowercase: Any = self.layer(_UpperCamelCase)
_lowercase: Union[str, Any] = self.shortcut(_UpperCamelCase)
hidden_state += residual
_lowercase: Any = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1):
super().__init__()
_lowercase: Tuple = in_channels != out_channels or stride != 1
_lowercase: Tuple = max(1 , out_channels // config.groups_width)
_lowercase: Dict = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase) if should_apply_shortcut else nn.Identity()
)
_lowercase: Union[str, Any] = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act) , RegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase) , )
_lowercase: str = ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[int]):
_lowercase: Optional[Any] = hidden_state
_lowercase: Tuple = self.layer(_UpperCamelCase)
_lowercase: Optional[Any] = self.shortcut(_UpperCamelCase)
hidden_state += residual
_lowercase: int = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , ):
super().__init__()
_lowercase: Optional[int] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
_lowercase: Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , ) , *[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) for _ in range(depth - 1)] , )
def UpperCAmelCase__ ( self : int , _UpperCamelCase : List[str]):
_lowercase: str = self.layers(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : RegNetConfig):
super().__init__()
_lowercase: Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_lowercase: Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(_UpperCamelCase , config.depths[1:]):
self.stages.append(RegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase))
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Tensor , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True):
_lowercase: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase: List[Any] = hidden_states + (hidden_state,)
_lowercase: int = stage_module(_UpperCamelCase)
if output_hidden_states:
_lowercase: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase)
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = RegNetConfig
lowerCamelCase : Any = """regnet"""
lowerCamelCase : int = """pixel_values"""
lowerCamelCase : Tuple = True
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Dict):
if isinstance(_UpperCamelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict=False):
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: Dict = value
_SCREAMING_SNAKE_CASE : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_SCREAMING_SNAKE_CASE : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCamelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Tuple):
super().__init__(_UpperCamelCase)
_lowercase: List[Any] = config
_lowercase: List[Any] = RegNetEmbeddings(_UpperCamelCase)
_lowercase: Optional[int] = RegNetEncoder(_UpperCamelCase)
_lowercase: str = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Tensor , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None):
_lowercase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase: Optional[int] = self.embedder(_UpperCamelCase)
_lowercase: Union[str, Any] = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase)
_lowercase: Optional[Any] = encoder_outputs[0]
_lowercase: List[Any] = self.pooler(_UpperCamelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple):
super().__init__(_UpperCamelCase)
_lowercase: List[Any] = config.num_labels
_lowercase: Any = RegNetModel(_UpperCamelCase)
# classification head
_lowercase: List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , ):
_lowercase: str = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase: int = self.regnet(_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase)
_lowercase: Dict = outputs.pooler_output if return_dict else outputs[1]
_lowercase: Union[str, Any] = self.classifier(_UpperCamelCase)
_lowercase: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase: Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase: Union[str, Any] = "single_label_classification"
else:
_lowercase: Optional[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
_lowercase: Dict = MSELoss()
if self.num_labels == 1:
_lowercase: Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_lowercase: int = loss_fct(_UpperCamelCase , _UpperCamelCase)
elif self.config.problem_type == "single_label_classification":
_lowercase: Dict = CrossEntropyLoss()
_lowercase: List[str] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_lowercase: List[str] = BCEWithLogitsLoss()
_lowercase: Union[str, Any] = loss_fct(_UpperCamelCase , _UpperCamelCase)
if not return_dict:
_lowercase: Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states)
| 226 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( lowercase__ , lowercase__ = True , lowercase__ = math.inf , lowercase__ = -math.inf , lowercase__ = math.inf , lowercase__ = -math.inf , lowercase__ = False , lowercase__ = 100 , lowercase__ = 0.0_1 , lowercase__ = 1 , ) ->Any:
_snake_case: List[str] = False
_snake_case: Union[str, Any] = search_prob
_snake_case: Union[str, Any] = start_temperate
_snake_case: int = []
_snake_case: Dict = 0
_snake_case: Union[str, Any] = None
while not search_end:
_snake_case: Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case: List[str] = current_state
scores.append(lowercase__ )
iterations += 1
_snake_case: Optional[int] = None
_snake_case: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case: Dict = random.randint(0 , len(lowercase__ ) - 1 ) # picking a random neighbor
_snake_case: List[Any] = neighbors.pop(lowercase__ )
_snake_case: str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case: List[str] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case: Dict = picked_neighbor
else:
_snake_case: Union[str, Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case: Union[str, Any] = picked_neighbor
_snake_case: List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case: List[str] = True
else:
_snake_case: List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase__ ) , lowercase__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( lowercase__ , lowercase__ ) ->List[str]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( lowercase__ , lowercase__ ) ->str:
return (3 * x**2) - (6 * y)
A : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'{local_min.score()}'
)
A : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'{local_min.score()}'
)
| 273 |
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = '''realm'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=128 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=8 , UpperCamelCase__=3072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=256 , UpperCamelCase__=10 , UpperCamelCase__=1e-3 , UpperCamelCase__=5 , UpperCamelCase__=320 , UpperCamelCase__=1335_3718 , UpperCamelCase__=5000 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
# Common config
snake_case : List[str] = vocab_size
snake_case : Any = max_position_embeddings
snake_case : Dict = hidden_size
snake_case : Optional[Any] = retriever_proj_size
snake_case : List[Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = num_candidates
snake_case : Optional[Any] = intermediate_size
snake_case : int = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Dict = initializer_range
snake_case : List[Any] = type_vocab_size
snake_case : List[str] = layer_norm_eps
# Reader config
snake_case : Union[str, Any] = span_hidden_size
snake_case : str = max_span_width
snake_case : List[Any] = reader_layer_norm_eps
snake_case : str = reader_beam_size
snake_case : Optional[int] = reader_seq_len
# Retrieval config
snake_case : Any = num_block_records
snake_case : str = searcher_beam_size
| 178 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__snake_case = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="base" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
snake_case : Tuple = 0
snake_case : List[str] = Path(self.hparams.output_dir )
snake_case : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case : int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
snake_case : PretrainedConfig = config
snake_case : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
snake_case : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
snake_case : PreTrainedTokenizer = tokenizer
snake_case : List[str] = MODEL_MODES[mode]
if model is None:
snake_case : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
snake_case : List[Any] = model
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case : Any = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case : List[str] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model
snake_case : Tuple = ["bias", "LayerNorm.weight"]
snake_case : Any = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
snake_case : Optional[int] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
snake_case : Optional[Any] = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case : Any = optimizer
snake_case : List[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
snake_case : Dict = len(self.test_dataloader().dataset )
else:
snake_case : str = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
snake_case : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> int:
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = self.output_dir.joinpath("best_tfmr" )
snake_case : int = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase__ ).parent / "test_run" / "cache" ) , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=UpperCamelCase__ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase__ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase__ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase__ )
parser.add_argument("--train_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--eval_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--adafactor" , action="store_true" )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = trainer.lr_schedulers[0]["scheduler"]
snake_case : str = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
snake_case : List[str] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
rank_zero_info("***** Test results *****" )
snake_case : Dict = trainer.callback_metrics
# Log and save results to file
snake_case : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase__ , "w" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(lowercase ).parent / "test_run" / "model_checkpoints" ) , type=lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=lowercase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=lowercase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=lowercase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(lowercase ).parent / "test_run" / "dummy-train-data" ) , type=lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __lowerCAmelCase ( lowercase : BaseTransformer , lowercase : argparse.Namespace , lowercase : Any=None , lowercase : List[str]=True , lowercase : List[Any]=[] , lowercase : Any=None , lowercase : Optional[int]=None , **lowercase : List[Any] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
snake_case : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase )
# add custom checkpoints
if checkpoint_callback is None:
snake_case : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase )
if logging_callback is None:
snake_case : Tuple = LoggingCallback()
snake_case : str = {}
if args.fpaa:
snake_case : Union[str, Any] = 16
if args.gpus > 1:
snake_case : List[str] = "auto"
snake_case : int = "ddp"
snake_case : Dict = args.accumulate_grad_batches
snake_case : Tuple = None
snake_case : Any = "auto"
snake_case : int = pl.Trainer.from_argparse_args(
lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , )
if args.do_train:
trainer.fit(lowercase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 178 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a , _a ):
"""simple docstring"""
__lowerCAmelCase : List[Any] ='''maskformer-swin'''
__lowerCAmelCase : Union[str, Any] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any, snake_case :str=224, snake_case :Any=4, snake_case :Tuple=3, snake_case :Dict=96, snake_case :List[Any]=[2, 2, 6, 2], snake_case :Optional[int]=[3, 6, 12, 24], snake_case :Optional[Any]=7, snake_case :Any=4.0, snake_case :Optional[Any]=True, snake_case :int=0.0, snake_case :Optional[int]=0.0, snake_case :int=0.1, snake_case :Optional[int]="gelu", snake_case :Optional[Any]=False, snake_case :Optional[int]=0.0_2, snake_case :int=1e-5, snake_case :str=None, snake_case :List[str]=None, **snake_case :Optional[Any], ):
"""simple docstring"""
super().__init__(**snake_case)
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(snake_case)
_lowercase =num_heads
_lowercase =window_size
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =use_absolute_embeddings
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(snake_case) - 1))
_lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1, len(snake_case) + 1)]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=snake_case, out_indices=snake_case, stage_names=self.stage_names)
| 557 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[Any], snake_case :Optional[Any], snake_case :int=13, snake_case :Optional[Any]=30, snake_case :Any=2, snake_case :Union[str, Any]=3, snake_case :Union[str, Any]=True, snake_case :List[str]=True, snake_case :List[str]=32, snake_case :Any=2, snake_case :Optional[Any]=4, snake_case :Any=37, snake_case :Tuple="gelu", snake_case :Union[str, Any]=0.1, snake_case :List[str]=0.1, snake_case :Tuple=10, snake_case :Optional[int]=0.0_2, snake_case :str=3, snake_case :Dict=None, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=snake_case, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self :Dict, snake_case :Any, snake_case :str, snake_case :Union[str, Any]):
"""simple docstring"""
_lowercase =TFViTModel(config=snake_case)
_lowercase =model(snake_case, training=snake_case)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
_lowercase =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size))
def UpperCamelCase__ ( self :Any, snake_case :List[Any], snake_case :Optional[int], snake_case :Dict):
"""simple docstring"""
_lowercase =self.type_sequence_label_size
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =model(snake_case, labels=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowercase =1
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase =model(snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : List[str] =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[Any] =False
__lowerCAmelCase : Any =False
__lowerCAmelCase : List[Any] =False
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =TFViTModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case, has_text_modality=snake_case, hidden_size=37)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :str):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case, tf.keras.layers.Layer))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
_lowercase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1], snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case)
@slow
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(snake_case)
def _snake_case () -> Any:
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=snake_case, return_tensors='tf')
# forward pass
_lowercase =model(**snake_case)
# verify the logits
_lowercase =tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, snake_case)
_lowercase =tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3], snake_case, atol=1e-4)
| 557 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.