code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : str = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a (__A ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''segformer'''
def __init__( self , A__=3 , A__=4 , A__=[2, 2, 2, 2] , A__=[8, 4, 2, 1] , A__=[32, 64, 160, 256] , A__=[7, 3, 3, 3] , A__=[4, 2, 2, 2] , A__=[1, 2, 5, 8] , A__=[4, 4, 4, 4] , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.1 , A__=0.0_2 , A__=0.1 , A__=1e-6 , A__=256 , A__=255 , **A__ , ):
super().__init__(**A__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , A__ , )
A__ : Optional[int] = num_channels
A__ : int = num_encoder_blocks
A__ : Optional[int] = depths
A__ : Optional[int] = sr_ratios
A__ : int = hidden_sizes
A__ : List[str] = patch_sizes
A__ : Any = strides
A__ : Union[str, Any] = mlp_ratios
A__ : str = num_attention_heads
A__ : List[str] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : Union[str, Any] = classifier_dropout_prob
A__ : Dict = initializer_range
A__ : Any = drop_path_rate
A__ : Dict = layer_norm_eps
A__ : Optional[int] = decoder_hidden_size
A__ : Any = kwargs.get("""reshape_last_stage""" , A__ )
A__ : Tuple = semantic_loss_ignore_index
class _a (__A ):
'''simple docstring'''
UpperCAmelCase__: List[str] = version.parse('''1.11''' )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 1e-4
@property
def __A ( self ):
return 12
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = XLMProphetNetTokenizer
a_ = False
a_ = True
def lowercase ( self : List[str] ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = '[PAD]'
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_1_2 )
def lowercase ( self : Any ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = 'Hello World!'
__lowerCAmelCase = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
# fmt: off
__lowerCAmelCase = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 53 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a = logging.getLogger()
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser()
parser.add_argument('-f' )
_A = parser.parse_args()
return args.f
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
_A = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Tuple ):
_A = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
_A = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCAmelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self : Optional[int] ):
_A = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(_UpperCAmelCase )
_A = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_UpperCAmelCase )
_A = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_UpperCAmelCase )
| 702 |
"""simple docstring"""
import numpy
# List of input, output pairs
a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a = (((515, 22, 13), 555), ((61, 35, 49), 150))
a = [2, 4, 1, 5]
a = len(train_data)
a = 0.0_0_9
def _snake_case ( _snake_case : int , _snake_case : List[Any]="train" ) -> Dict:
'''simple docstring'''
return calculate_hypothesis_value(_snake_case , _snake_case ) - output(
_snake_case , _snake_case )
def _snake_case ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
_A = 0
for i in range(len(_snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( _snake_case : str , _snake_case : Dict=m ) -> Tuple:
'''simple docstring'''
_A = 0
for i in range(_snake_case ):
if index == -1:
summation_value += _error(_snake_case )
else:
summation_value += _error(_snake_case ) * train_data[i][0][index]
return summation_value
def _snake_case ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
_A = summation_of_cost_derivative(_snake_case , _snake_case ) / m
return cost_derivative_value
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_A = 0.000002
_A = 0
_A = 0
while True:
j += 1
_A = [0, 0, 0, 0]
for i in range(0 , len(_snake_case ) ):
_A = get_cost_derivative(i - 1 )
_A = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_snake_case , _snake_case , atol=_snake_case , rtol=_snake_case , ):
break
_A = temp_parameter_vector
print(('Number of iterations:', j) )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
for i in range(len(_snake_case ) ):
print(('Actual output value:', output(_snake_case , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_snake_case , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 505 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> List[str]:
"""simple docstring"""
for char in word:
__A = ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
__A = set()
for token in tokens:
__A = len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
__A = list(__lowercase )
return word_list
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : set() ) -> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__A = max([len(__lowercase ) for w in chinese_word_set] )
__A = bert_tokens
__A , __A = 0, len(__lowercase )
while start < end:
__A = True
if is_chinese(bert_word[start] ):
__A = min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
__A = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__A = """##""" + bert_word[j]
__A = start + i
__A = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : LTP , __lowercase : BertTokenizer ) -> Union[str, Any]:
"""simple docstring"""
__A = []
for i in range(0 , len(__lowercase ) , 1_0_0 ):
__A = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__A = [get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
__A = []
for i in range(0 , len(__lowercase ) , 1_0_0 ):
__A = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowercase ) == len(__lowercase )
__A = []
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
__A = []
for id in input_ids:
__A = bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
__A = add_sub_symbol(__lowercase , __lowercase )
__A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
__A = token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__A = f.readlines()
__A = [line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__A = LTP(args.ltp ) # faster in GPU device
__A = BertTokenizer.from_pretrained(args.bert )
__A = prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__A = [json.dumps(__lowercase ) + """\n""" for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__a : str = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__a : Optional[int] = parser.parse_args()
main(args)
| 637 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCamelCase__ : int = {'facebook/blenderbot_small-90M': 512}
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = set()
UpperCAmelCase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Union[str, Any] = char
UpperCAmelCase__ : Union[str, Any] = set(_A )
return pairs
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="__start__" ,lowerCamelCase_="__end__" ,lowerCamelCase_="__unk__" ,lowerCamelCase_="__null__" ,**lowerCamelCase_ ,) -> Any:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,**lowerCamelCase_ )
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Dict = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : Any = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase__ : Tuple = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase__ : Dict = {}
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Any = re.sub('''([.,!?()])''' ,r''' \1''' ,lowerCamelCase_ )
UpperCAmelCase__ : List[str] = re.sub('''(\')''' ,r''' \1 ''' ,lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = re.sub(r'''\s{2,}''' ,''' ''' ,lowerCamelCase_ )
if "\n" in token:
UpperCAmelCase__ : List[Any] = token.replace('''\n''' ,''' __newln__''' )
UpperCAmelCase__ : List[str] = token.split(''' ''' )
UpperCAmelCase__ : int = []
for token in tokens:
if not len(lowerCamelCase_ ):
continue
UpperCAmelCase__ : Union[str, Any] = token.lower()
UpperCAmelCase__ : int = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCAmelCase__ : Dict = get_pairs(lowerCamelCase_ )
if not pairs:
words.append(lowerCamelCase_ )
continue
while True:
UpperCAmelCase__ : List[str] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = bigram
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[Any] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase__ : Optional[int] = word.index(lowerCamelCase_ ,lowerCamelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase__ : Tuple = get_pairs(lowerCamelCase_ )
UpperCAmelCase__ : int = '''@@ '''.join(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
words.append(lowerCamelCase_ )
return " ".join(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = re.findall(r'''\S+\n?''' ,lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : int = token.lower()
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ ,self.unk_token )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
UpperCAmelCase__ : int = ''' '''.join(lowerCamelCase_ ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : Any = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Optional[int] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
UpperCAmelCase__ : Optional[int] = 0
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : Optional[Any] = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 496 | '''simple docstring'''
import math
def __UpperCamelCase( _A : float , _A : float ):
'''simple docstring'''
return math.pow(_A , 2 ) - a
def __UpperCamelCase( _A : float ):
'''simple docstring'''
return 2 * x
def __UpperCamelCase( _A : float ):
'''simple docstring'''
UpperCAmelCase__ : int = 2.0
while start <= a:
UpperCAmelCase__ : int = math.pow(_A , 2 )
return start
def __UpperCamelCase( _A : float , _A : int = 99_99 , _A : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase__ : Any = get_initial_point(_A )
for _ in range(_A ):
UpperCAmelCase__ : Optional[Any] = value
UpperCAmelCase__ : Tuple = value - fx(_A , _A ) / fx_derivative(_A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 496 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = RoCBertTokenizer
lowercase_ = None
lowercase_ = False
lowercase_ = True
lowercase_ = filter_non_english
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase__: Dict ={}
lowerCamelCase__: Dict ={}
for i, value in enumerate(UpperCAmelCase_):
lowerCamelCase__: Any =i
lowerCamelCase__: List[str] =i
lowerCamelCase__: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: int =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowerCamelCase__: Union[str, Any] =tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCAmelCase_ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__: Dict ={}
for i, token in enumerate(UpperCAmelCase_):
lowerCamelCase__: Any =i
lowerCamelCase__: Union[str, Any] =RoCBertWordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
lowerCamelCase__: Tuple =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
lowerCamelCase__: str =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase__: Optional[int] =tokenizer_r.encode_plus(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =tokenizer_r.do_lower_case if hasattr(UpperCAmelCase_ , "do_lower_case") else False
lowerCamelCase__: Tuple =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =["的", "人", "有"]
lowerCamelCase__: List[str] ="".join(UpperCAmelCase_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
lowerCamelCase__: Dict =True
lowerCamelCase__: Optional[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_)
lowerCamelCase__: List[str] =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =False
lowerCamelCase__: Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: str =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_)
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__: List[str] =[
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase_)
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowerCamelCase__: int =tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_tokenizers(do_lower_case=UpperCAmelCase_)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
lowerCamelCase__: Union[str, Any] ="你好,你是谁"
lowerCamelCase__: str =tokenizer.tokenize(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_)
lowerCamelCase__: str =tokenizer.prepare_for_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tokenizer.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = CustomTokenizer
pass
| 59 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : Optional[Any] =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : Any =emb.weight.shape
snake_case__ : str =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =emb.weight.data
return lin_layer
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict="facebook/mbart-large-en-ro" , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
snake_case__ : Optional[Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ : int =MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
snake_case__ : Optional[Any] ='''relu'''
snake_case__ : List[Any] =state_dict['''decoder.embed_tokens.weight''']
snake_case__ : List[str] =MBartForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE )
if finetuned:
snake_case__ : int =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 717 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase__ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowercase_ ( SCREAMING_SNAKE_CASE : str = "mumbai" ):
"""simple docstring"""
snake_case__ : Optional[int] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case__ : Optional[Any] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case__ : Dict =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 408 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''prophetnet.tokenizer'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Any = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase : List[Any] = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = token.rstrip("\n" )
lowerCAmelCase : Optional[int] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a : Tuple =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[UNK]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCAmelCase : int = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
lowerCAmelCase : Dict = f"""[unused{i}]"""
lowerCAmelCase : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCAmelCase : List[Any] = 12
lowerCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.__dict__.copy()
lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : Union[str, Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCAmelCase : Tuple = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
def lowerCamelCase_ ( A : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase_ = set()
# Replace all the whitespace in our sentence
lowerCAmelCase_ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A ) == 26
def lowerCamelCase_ ( A : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase_ = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase_ = True
elif char.isupper():
lowerCAmelCase_ = True
return all(A )
def lowerCamelCase_ ( A : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCamelCase_ ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase_ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=A ) )
print(timeit('''is_pangram_faster()''' , setup=A ) )
print(timeit('''is_pangram_fastest()''' , setup=A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 721 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = torch.nn.Linear(2 , 4 )
lowerCAmelCase_ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase_ = torch.optim.lr_scheduler.OneCycleLR(A , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase_ ( A : List[Any] ):
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase_ ( A : Any ):
"""simple docstring"""
lowerCAmelCase_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A )
class UpperCamelCase_ ( A ):
'''simple docstring'''
@require_cuda
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = Accelerator(cpu=_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def lowercase__ ( self):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCAmelCase , **_UpperCAmelCase):
pass
with patch('''torch.cuda.set_device''' , _UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64'''):
lowerCAmelCase_ = Accelerator()
self.assertEqual(str(accelerator.state.device) , '''cuda:64''')
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = get_signature(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = get_signature(_UpperCAmelCase)
# saving hook
def save_config(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(_UpperCAmelCase , '''data.json''') , '''w''') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
# loading hook
def load_config(_UpperCAmelCase , _UpperCAmelCase):
with open(os.path.join(_UpperCAmelCase , '''data.json''') , '''r''') as f:
lowerCAmelCase_ = json.load(_UpperCAmelCase)
lowerCAmelCase_ = config['''class_name''']
lowerCAmelCase_ = accelerator.register_save_state_pre_hook(_UpperCAmelCase)
lowerCAmelCase_ = accelerator.register_load_state_pre_hook(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(_UpperCAmelCase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
lowerCAmelCase_ = None
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def lowercase__ ( self):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = create_components()
lowerCAmelCase_ = [1, 2, 3]
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_UpperCAmelCase , '''_is_accelerate_prepared''' , _UpperCAmelCase) , _UpperCAmelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map={'''''': 0} , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@slow
@require_bnb
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = Accelerator()
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
lowerCAmelCase_ = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCAmelCase):
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowerCAmelCase_ = infer_auto_device_map(_UpperCAmelCase)
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_UpperCAmelCase , device_map=_UpperCAmelCase , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
@require_cuda
def lowercase__ ( self):
lowerCAmelCase_ = torch.nn.Linear(10 , 10)
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , lr=0.01)
lowerCAmelCase_ = Accelerator(cpu=_UpperCAmelCase)
lowerCAmelCase_ = accelerator.prepare(_UpperCAmelCase)
| 413 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Any:
if "model" in orig_key:
_UpperCAmelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_UpperCAmelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_UpperCAmelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_UpperCAmelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_UpperCAmelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
_UpperCAmelCase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_UpperCAmelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_UpperCAmelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_UpperCAmelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_UpperCAmelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_UpperCAmelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_UpperCAmelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_UpperCAmelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_UpperCAmelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_UpperCAmelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_UpperCAmelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_UpperCAmelCase = """yoso.""" + orig_key
return orig_key
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCAmelCase = val
_UpperCAmelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCAmelCase = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = torch.load(__snake_case , map_location="""cpu""" )["""model_state_dict"""]
_UpperCAmelCase = YosoConfig.from_json_file(__snake_case )
_UpperCAmelCase = YosoForMaskedLM(__snake_case )
_UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__a: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a: Union[str, Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 108 |
def __lowerCAmelCase ( A = 100 ):
UpperCAmelCase_ = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 162 | 0 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0 ):
return sorted(lowerCAmelCase__, key=lambda lowerCamelCase : x[column] )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1, lowerCAmelCase__ ):
lowercase :Optional[int] = euclidean_distance_sqr(points[i], points[j] )
if current_dis < min_dis:
lowercase :Optional[int] = current_dis
return min_dis
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=float("inf" ) ):
for i in range(min(6, points_counts - 1 ), lowerCAmelCase__ ):
for j in range(max(0, i - 6 ), lowerCAmelCase__ ):
lowercase :int = euclidean_distance_sqr(points[i], points[j] )
if current_dis < min_dis:
lowercase :List[str] = current_dis
return min_dis
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__, lowerCAmelCase__ )
# recursion
lowercase :Optional[Any] = points_counts // 2
lowercase :int = closest_pair_of_points_sqr(
lowerCAmelCase__, points_sorted_on_y[:mid], lowerCAmelCase__ )
lowercase :Union[str, Any] = closest_pair_of_points_sqr(
lowerCAmelCase__, points_sorted_on_y[mid:], points_counts - mid )
lowercase :Optional[int] = min(lowerCAmelCase__, lowerCAmelCase__ )
lowercase :int = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
lowercase :Dict = dis_between_closest_in_strip(
lowerCAmelCase__, len(lowerCAmelCase__ ), lowerCAmelCase__ )
return min(lowerCAmelCase__, lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[Any] = column_based_sort(lowerCAmelCase__, column=0 )
lowercase :List[Any] = column_based_sort(lowerCAmelCase__, column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 721 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( lowerCAmelCase):
_a = ['''image_processor''', '''tokenizer''']
_a = '''LayoutLMv2ImageProcessor'''
_a = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self: int , _lowerCAmelCase: Any=None , _lowerCAmelCase: List[Any]=None , **_lowerCAmelCase: Optional[Any] ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCAmelCase , )
lowercase :Optional[Any] = kwargs.pop("feature_extractor" )
lowercase :Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _lowerCAmelCase: Union[List[List[int]], List[List[List[int]]]] = None , _lowerCAmelCase: Optional[Union[List[int], List[List[int]]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , **_lowerCAmelCase: Tuple , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowercase :int = self.image_processor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :int = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase :int = features["words"]
lowercase :Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel values
lowercase :Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase :Any = self.get_overflowing_images(_lowerCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowercase :Optional[int] = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase :Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self: Dict , *_lowerCAmelCase: str , **_lowerCAmelCase: List[Any] ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , *_lowerCAmelCase: Tuple , **_lowerCAmelCase: Union[str, Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self: List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCAmelCase , )
return self.image_processor
| 453 | 0 |
"""simple docstring"""
def _UpperCamelCase ( A = 100 ):
UpperCamelCase_ =(n * (n + 1) // 2) ** 2
UpperCamelCase_ =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 391 |
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : List[str] = min(SCREAMING_SNAKE_CASE__ )
__a : Tuple = max(SCREAMING_SNAKE_CASE__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE__ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : Dict = mean(SCREAMING_SNAKE_CASE__ )
__a : str = stdev(SCREAMING_SNAKE_CASE__ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE__ ) for x in data]
| 597 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''segformer'''
def __init__( self , a=3 , a=4 , a=[2, 2, 2, 2] , a=[8, 4, 2, 1] , a=[32, 64, 160, 256] , a=[7, 3, 3, 3] , a=[4, 2, 2, 2] , a=[1, 2, 5, 8] , a=[4, 4, 4, 4] , a="gelu" , a=0.0 , a=0.0 , a=0.1 , a=0.02 , a=0.1 , a=1E-6 , a=256 , a=255 , **a , ) -> Tuple:
super().__init__(**a)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , a , )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_encoder_blocks
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = sr_ratios
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = decoder_hidden_size
SCREAMING_SNAKE_CASE = kwargs.get('reshape_last_stage' , a)
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class _snake_case ( A__ ):
_lowercase : Tuple = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
| 444 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a_ : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
a_ : Tuple = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
a_ : Optional[Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return float((preds == labels).mean())
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase))
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE = en_sentvecs - np.mean(_UpperCAmelCase , axis=0)
SCREAMING_SNAKE_CASE = in_sentvecs - np.mean(_UpperCAmelCase , axis=0)
SCREAMING_SNAKE_CASE = cdist(_UpperCAmelCase , _UpperCAmelCase , 'cosine')
SCREAMING_SNAKE_CASE = np.array(range(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = sim.argsort(axis=1)[:, :10]
SCREAMING_SNAKE_CASE = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a , a)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a , a)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a , a)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 444 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( a_ ):
_A : List[str] = ['image_processor', 'tokenizer']
_A : Any = 'OwlViTImageProcessor'
_A : Tuple = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case__ , )
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="max_length" , snake_case__="np" , **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(snake_case__ , snake_case__ ) or (isinstance(snake_case__ , snake_case__ ) and not isinstance(text[0] , snake_case__ )):
UpperCAmelCase = [self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )]
elif isinstance(snake_case__ , snake_case__ ) and isinstance(text[0] , snake_case__ ):
UpperCAmelCase = []
# Maximum number of queries across batch
UpperCAmelCase = max([len(snake_case__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case__ ) != max_num_queries:
UpperCAmelCase = t + [""" """] * (max_num_queries - len(snake_case__ ))
UpperCAmelCase = self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
encodings.append(snake_case__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase = BatchEncoding()
UpperCAmelCase = input_ids
UpperCAmelCase = attention_mask
if query_images is not None:
UpperCAmelCase = BatchEncoding()
UpperCAmelCase = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ ).pixel_values
UpperCAmelCase = query_pixel_values
if images is not None:
UpperCAmelCase = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> int:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case__ , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case__ , )
return self.image_processor
| 673 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Tuple:
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a = test_metrics
@require_cpu
def UpperCamelCase__ ( self ) -> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ) -> int:
print(f"Found {torch.cuda.device_count()} devices." )
__a = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 539 | 0 |
"""simple docstring"""
def A ( snake_case :str ) -> Any:
__UpperCamelCase , __UpperCamelCase = [], []
while len(_lowerCAmelCase ) > 1:
__UpperCamelCase , __UpperCamelCase = min(_lowerCAmelCase ), max(_lowerCAmelCase )
start.append(_lowerCAmelCase )
end.append(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase : List[str] = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 701 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
UpperCamelCase : List[Any] = "Dummy User"
UpperCamelCase : List[Any] = "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"
UpperCamelCase : Any = "https://hub-ci.huggingface.co"
UpperCamelCase : str = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCamelCase : Optional[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCamelCase : Optional[int] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( snake_case :Optional[Any] ) -> Union[str, Any]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case )
@pytest.fixture
def A ( snake_case :str ) -> List[Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case )
@pytest.fixture
def A ( snake_case :Optional[int] ) -> Dict:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case )
@pytest.fixture
def A ( snake_case :List[Any] , snake_case :Tuple ) -> Tuple:
HfFolder.save_token(snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> List[Any]:
return HfApi(endpoint=snake_case )
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi ) -> List[Any]:
__UpperCamelCase = HfFolder.get_token()
HfFolder.save_token(snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case )
@pytest.fixture
def A ( snake_case :str ) -> str:
def _cleanup_repo(snake_case :Union[str, Any] ):
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( snake_case :List[str] ) -> Any:
@contextmanager
def _temporary_repo(snake_case :Tuple ):
try:
yield repo_id
finally:
cleanup_repo(snake_case )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :Dict , snake_case :Dict ) -> List[str]:
__UpperCamelCase = f'repo_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data/text_data.txt' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :Optional[int] , snake_case :str , snake_case :Tuple ) -> Any:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[Any] , snake_case :str ) -> Optional[int]:
__UpperCamelCase = f'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :int , snake_case :Optional[int] , snake_case :Optional[int] ) -> str:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[str] , snake_case :Any ) -> List[Any]:
__UpperCamelCase = f'repo_zipped_img_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :Optional[int] ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 293 | 0 |
def _A ( __snake_case :List[Any] ) -> Any:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("only integers accepted as input" )
else:
__SCREAMING_SNAKE_CASE = str(abs(lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 693 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
'''simple docstring'''
def _snake_case ( A ) -> list[int]:
if length <= 0 or not isinstance(A , A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 90 |
"""simple docstring"""
def _lowerCAmelCase(a : list[int] , a : list[int] ) -> None:
_SCREAMING_SNAKE_CASE =len(a )
print('''The following activities are selected:''' )
# The first activity is always selected
_SCREAMING_SNAKE_CASE =0
print(a , end=''',''' )
# Consider rest of the activities
for j in range(a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a , end=''',''' )
_SCREAMING_SNAKE_CASE =j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = [1, 3, 0, 5, 8, 5]
UpperCAmelCase_ : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 255 | 0 |
def A_ ( snake_case : str , snake_case : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def A_ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case : float ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 0 |
import random
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = a[left_index]
__magic_name__ :str = left_index + 1
for j in range(left_index + 1, snake_case ):
if a[j] < pivot:
__magic_name__ , __magic_name__ :Any = a[i], a[j]
i += 1
__magic_name__ , __magic_name__ :int = a[i - 1], a[left_index]
return i - 1
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if left < right:
__magic_name__ :Optional[Any] = random.randint(snake_case, right - 1 )
__magic_name__ , __magic_name__ :Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__magic_name__ :Tuple = partition(snake_case, snake_case, snake_case )
quick_sort_random(
snake_case, snake_case, snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case, pivot_index + 1, snake_case ) # recursive quicksort to the right of the pivot point
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__magic_name__ :int = [int(snake_case ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case, 0, len(snake_case ) )
print(snake_case )
if __name__ == "__main__":
main()
| 0 | import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : Any , A_ : Union[str, Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(A_ , A_ )
lowerCamelCase_: str = self.feature_extractor
lowerCamelCase_: Optional[int] = False
@classmethod
def lowerCAmelCase ( cls : int , A_ : Dict , **A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(A_ , **A_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , A_ , )
lowerCamelCase_: Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ )
lowerCamelCase_: List[str] = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ )
return cls(feature_extractor=A_ , tokenizer=A_ )
def __call__( self : List[str] , *A_ : Tuple , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase_: List[str] = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase_: str = kwargs.pop("""audio""" , A_ )
lowerCamelCase_: str = kwargs.pop("""sampling_rate""" , A_ )
lowerCamelCase_: List[Any] = kwargs.pop("""text""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: List[str] = args[0]
lowerCamelCase_: Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase_: Dict = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
lowerCamelCase_: Tuple = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_: Any = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Optional[Any] , *A_ : Optional[int] , **A_ : Tuple ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
lowerCamelCase_: str = kwargs.pop("""input_features""" , A_ )
lowerCamelCase_: Optional[Any] = kwargs.pop("""labels""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: Tuple = args[0]
lowerCamelCase_: int = args[1:]
if input_features is not None:
lowerCamelCase_: Tuple = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
lowerCamelCase_: Optional[int] = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_: List[str] = labels["""input_ids"""]
return input_features
def lowerCAmelCase ( self : str , *A_ : int , **A_ : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowerCAmelCase ( self : Any , *A_ : Union[str, Any] , **A_ : str ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase_: Union[str, Any] = True
lowerCamelCase_: int = self.tokenizer
yield
lowerCamelCase_: int = self.feature_extractor
lowerCamelCase_: Optional[Any] = False
| 423 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
lowerCamelCase : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Any , __a: Optional[int]=13 , __a: Tuple=7 , __a: Optional[int]=True , __a: Optional[int]=False , __a: List[str]=99 , __a: str=16 , __a: str=2 , __a: List[str]=4 , __a: Union[str, Any]=4 , __a: Dict="relu" , __a: int=0.1 , __a: str=0.1 , __a: Union[str, Any]=0.0 , __a: Optional[Any]=0.0 , __a: List[Any]=20 , __a: str=2 , __a: int=1 , __a: Optional[Any]=0 , )-> Optional[int]:
lowerCamelCase : Any = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[str] = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : Dict = decoder_layerdrop
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : Tuple = bos_token_id
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[str] = self.eos_token_id # Eos Token
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : List[str] = self.get_config()
lowerCamelCase : Tuple = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def a__ ( self: str )-> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def a__ ( self: List[str] )-> Optional[Any]:
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self: Any , __a: Optional[int] , __a: List[Any] )-> List[str]:
lowerCamelCase : Dict = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
lowerCamelCase : Any = inputs_dict["""input_ids"""]
lowerCamelCase : Union[str, Any] = inputs_dict["""attention_mask"""]
lowerCamelCase : Dict = inputs_dict["""head_mask"""]
# first forward pass
lowerCamelCase : Dict = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
lowerCamelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase : Tuple = model(__a , attention_mask=__a )["""last_hidden_state"""]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-2 ) )
def a__ ( self: Union[str, Any] , __a: str , __a: Dict )-> Any:
lowerCamelCase : Union[str, Any] = MaMaaaModel(config=__a ).to(__a ).eval()
lowerCamelCase : Any = model(**__a )
lowerCamelCase : Any = outputs.encoder_last_hidden_state
lowerCamelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : List[Any] = model.get_encoder()
encoder.save_pretrained(__a )
lowerCamelCase : Union[str, Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
lowerCamelCase : List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Dict = model.get_decoder()
decoder.save_pretrained(__a )
lowerCamelCase : Dict = MaMaaaDecoder.from_pretrained(__a ).to(__a )
lowerCamelCase : int = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : int =(
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] =True
snake_case__ : Optional[Any] =True
snake_case__ : Dict =False
snake_case__ : Optional[Any] =False
def a__ ( self: Tuple , __a: Dict , __a: List[Any] , __a: Union[str, Any] , __a: Any , __a: Optional[Any] )-> Dict:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def a__ ( self: Any )-> List[str]:
lowerCamelCase : Any = MaMaaaModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a )
def a__ ( self: str )-> str:
self.config_tester.run_common_tests()
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
lowerCamelCase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
lowerCamelCase : str = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCamelCase : List[Any] = inputs["""input_ids"""]
lowerCamelCase : int = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
lowerCamelCase : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase : Any = wte(__a )
else:
lowerCamelCase : Optional[int] = wte(__a )
lowerCamelCase : Tuple = wte(__a )
with torch.no_grad():
model(**__a )[0]
def a__ ( self: List[str] )-> List[str]:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : List[str] = input_dict["""input_ids"""]
lowerCamelCase : int = input_ids.ne(1 ).to(__a )
lowerCamelCase : Any = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case ( UpperCamelCase__ : Dict ) -> Optional[Any]:
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
__lowerCamelCase :List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Any )-> str:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Dict = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : List[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : int = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**__a )[0]
lowerCamelCase : List[Any] = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
lowerCamelCase : Union[str, Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : Optional[Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : str = model(**__a )[0]
lowerCamelCase : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : str = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: str )-> Optional[int]:
lowerCamelCase : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : Any = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCamelCase : List[str] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase : int = tokenizer(__a , padding=__a , return_tensors="""pt""" )
lowerCamelCase : str = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCamelCase : Tuple = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCamelCase : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en
| 702 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=64 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=4 , snake_case_=1 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = q_groups
_A = k_groups
_A = v_groups
_A = post_attention_groups
_A = intermediate_groups
_A = output_groups
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = SqueezeBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = SqueezeBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = SqueezeBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = SqueezeBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = SqueezeBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = SqueezeBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
((_A), (_A), (_A), (_A), (_A), (_A)) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__magic_name__ = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = True
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = SqueezeBertModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , dim=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = SqueezeBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_A = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
_A = model(snake_case_ )[0]
_A = torch.Size((1, 3) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-4 ) )
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class a__( snake_case__ ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 538 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCamelCase ):
def __init__( self : List[Any] , *snake_case : Optional[int] , **snake_case : Any ) -> None:
'''simple docstring'''
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 109 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Union[str, Any]:
A = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
A = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase__ )
if matches:
A = float(matches[1] )
A = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A = 1001
A = 'imagenet-1k-id2label.json'
A = 'huggingface/label-files'
A = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A = 'background'
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( ) -> List[Any]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
A = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
A = image_processor(images=prepare_img() , return_tensors='pt' )
A = model(**lowerCamelCase__ )
A = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
A = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
A = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
A = 'google/' + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 109 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase ( __snake_case ):
lowercase = """gpt_neo"""
lowercase = ["""past_key_values"""]
lowercase = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , __magic_name__ : int=5_0_2_5_7 , __magic_name__ : List[Any]=2_0_4_8 , __magic_name__ : Optional[Any]=2_0_4_8 , __magic_name__ : int=2_4 , __magic_name__ : int=[[["global", "local"], 1_2]] , __magic_name__ : Any=1_6 , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=2_5_6 , __magic_name__ : Any="gelu_new" , __magic_name__ : str=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=1e-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : List[str]=True , __magic_name__ : Dict=5_0_2_5_6 , __magic_name__ : Optional[Any]=5_0_2_5_6 , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_layers
UpperCamelCase = num_heads
UpperCamelCase = intermediate_size
UpperCamelCase = window_size
UpperCamelCase = activation_function
UpperCamelCase = resid_dropout
UpperCamelCase = embed_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = attention_types
UpperCamelCase = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def lowerCamelCase_ ( __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
import torch
UpperCamelCase = input.size()
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = shape[dimension]
UpperCamelCase = torch.arange(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.div(sizedim - size , SCREAMING_SNAKE_CASE_ , rounding_mode="""floor""" ) + 1
UpperCamelCase = torch.arange(SCREAMING_SNAKE_CASE_ ) + low_indices[:min_length][:, None]
UpperCamelCase = [slice(SCREAMING_SNAKE_CASE_ )] * rank
UpperCamelCase = indices
UpperCamelCase = input[s]
UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
import torch
UpperCamelCase = torch.arange(1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.remainder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = remainders == 0
UpperCamelCase = candidates[divisor_indices]
UpperCamelCase = torch.max(SCREAMING_SNAKE_CASE_ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rounding_mode="""floor""" )
class UpperCAmelCase ( __snake_case ):
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self._config.num_heads
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
UpperCamelCase = ordered_inputs["""attention_mask"""].dtype
UpperCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return 1_3
| 386 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowercase ( SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
UpperCamelCase = bnb_quantization_config.load_in_abit
UpperCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
UpperCamelCase = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase = []
UpperCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase = load_in_abit
UpperCamelCase = load_in_abit
UpperCamelCase = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
UpperCamelCase = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
UpperCamelCase = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase = True
UpperCamelCase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
UpperCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase = {}
UpperCamelCase = special_dtypes
UpperCamelCase = no_split_module_classes
UpperCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = max_memory
UpperCamelCase = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase = """.""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
UpperCamelCase = module.weight.data
if module.bias is not None:
UpperCamelCase = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase = False
if hasattr(SCREAMING_SNAKE_CASE_ , """base_model_prefix""" ):
UpperCamelCase = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase = [""".weight""", """.bias"""]
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(SCREAMING_SNAKE_CASE_ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def _lowercase ( SCREAMING_SNAKE_CASE_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = param_name
UpperCamelCase = model
if "." in tensor_name:
UpperCamelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
# offload weights
UpperCamelCase = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """meta""" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 386 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCamelCase__ ( _lowercase : Optional[int]=None ) -> List[Any]:
if subparsers is not None:
__UpperCAmelCase: str = subparsers.add_parser("""test""" )
else:
__UpperCAmelCase: List[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCamelCase__ ( _lowercase : Dict ) -> str:
__UpperCAmelCase: Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__UpperCAmelCase: str = script_name
else:
__UpperCAmelCase: int = F'''--config_file={args.config_file} {script_name}'''
__UpperCAmelCase: int = ["""accelerate-launch"""] + test_args.split()
__UpperCAmelCase: Any = execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def UpperCamelCase__ ( ) -> Any:
__UpperCAmelCase: List[str] = test_command_parser()
__UpperCAmelCase: Dict = parser.parse_args()
test_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 715 | '''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE_ = get_logger(__name__)
SCREAMING_SNAKE_CASE_ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a ( __lowerCAmelCase ):
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
'''simple docstring'''
for processor in self:
__UpperCAmelCase: str = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase: List[Any] = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
__UpperCAmelCase: Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase: Any = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = scores / self.temperature
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase: int = top_p
__UpperCAmelCase: str = filter_value
__UpperCAmelCase: List[Any] = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , scores.shape[-1] )
__UpperCAmelCase: Tuple = jnp.full_like(snake_case_ , self.filter_value )
__UpperCAmelCase: Optional[int] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase: List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase: str = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
__UpperCAmelCase: str = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("""Inf""" ) , snake_case_ = 1 ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase: Optional[Any] = max(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = scores.shape
__UpperCAmelCase: List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase: Any = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase, __UpperCAmelCase: List[Any] = lax.top_k(snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase: List[Any] = topk_scores.flatten()
__UpperCAmelCase: str = topk_indices.flatten() + shift
__UpperCAmelCase: List[Any] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
__UpperCAmelCase: Any = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = max_length
__UpperCAmelCase: List[str] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase: List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase: Any = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase: List[Any] = min_length
__UpperCAmelCase: Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase: int = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = list(snake_case_ )
__UpperCAmelCase: Optional[Any] = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase: str = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , snake_case_ )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase: Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase: Union[str, Any] = force_token_array.at[index].set(snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
def _force_token(snake_case_ ):
__UpperCAmelCase: List[Any] = scores.shape[0]
__UpperCAmelCase: int = self.force_token_array[generation_idx]
__UpperCAmelCase: Union[str, Any] = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float("""inf""" )
__UpperCAmelCase: List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase: List[Any] = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
__UpperCAmelCase: Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = generate_config.eos_token_id
__UpperCAmelCase: Union[str, Any] = generate_config.no_timestamps_token_id
__UpperCAmelCase: Tuple = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase: Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , """max_initial_timestamp_index""" ):
__UpperCAmelCase: Optional[int] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase: List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase: Any = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(snake_case_ , snake_case_ ):
__UpperCAmelCase: Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
__UpperCAmelCase: Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , snake_case_ , )
__UpperCAmelCase: Dict = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
__UpperCAmelCase: List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
__UpperCAmelCase: List[str] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase: List[str] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase: Union[str, Any] = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
__UpperCAmelCase: int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase: Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , snake_case_ , )
__UpperCAmelCase: Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 466 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(lowerCAmelCase__ ,lowerCAmelCase__ ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase ( lowerCAmelCase__ = 1.4 ,lowerCAmelCase__ = -9.6 ):
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = next_point(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
from maths.prime_check import is_prime
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : str = f"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __magic_name__ :
UpperCAmelCase = 42
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = """dict"""
UpperCAmelCase = None
UpperCAmelCase = field(default="""Translation""" , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self : str ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCamelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __magic_name__ :
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = """dict"""
UpperCAmelCase = None
UpperCAmelCase = field(default="""TranslationVariableLanguages""" , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def _UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : List[str] ) -> Any:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Optional[int] ) -> Dict:
UpperCAmelCase = set(self.languages )
if self.languages and set(__lowerCamelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(__lowerCamelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase = zip(*sorted(__lowerCamelCase ) )
return {"language": languages, "translation": translations}
def _UpperCamelCase ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ='▁'
__UpperCAmelCase ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__UpperCAmelCase ={
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__UpperCAmelCase ={
'facebook/s2t-small-librispeech-asr': 1024,
}
__UpperCAmelCase =['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__UpperCAmelCase ={'mustc': MUSTC_LANGS}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = MAX_MODEL_INPUT_SIZES
lowercase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<unk>" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A__ = do_upper_case
A__ = do_lower_case
A__ = load_json(_lowerCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A__ = lang_codes
A__ = LANGUAGES[lang_codes]
A__ = [f"""<lang:{lang}>""" for lang in self.langs]
A__ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
A__ = self.lang_tokens
A__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A__ = {}
@property
def lowercase_ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.lang_code_to_id[tgt_lang]
A__ = [lang_code_id]
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A__ = []
else:
current_sub_tokens.append(_lowerCamelCase )
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def __a ( A , A ) -> Optional[int]:
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCamelCase_ , "r" ) as f:
return json.load(lowerCamelCase_ )
def __a ( A , A ) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 ) | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
class __snake_case( __A ):
_A = '''encoder-decoder'''
_A = True
def __init__( self , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_SCREAMING_SNAKE_CASE = kwargs.pop('''encoder''' )
_SCREAMING_SNAKE_CASE = encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE = kwargs.pop('''decoder''' )
_SCREAMING_SNAKE_CASE = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE = AutoConfig.for_model(A_ , **A_ )
_SCREAMING_SNAKE_CASE = AutoConfig.for_model(A_ , **A_ )
_SCREAMING_SNAKE_CASE = True
@classmethod
def A ( cls , A_ , A_ , **A_ ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.encoder.to_dict()
_SCREAMING_SNAKE_CASE = self.decoder.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 704 |
"""simple docstring"""
import os
import string
import sys
lowerCamelCase : Any = 1 << 8
lowerCamelCase : Optional[int] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowerCamelCase : str = KEYMAP["""up"""]
lowerCamelCase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCamelCase : Tuple = ord(str(i))
def A__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_SCREAMING_SNAKE_CASE = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
_SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_SCREAMING_SNAKE_CASE = chr(KEYMAP['''esc'''] )
except KeyError:
_SCREAMING_SNAKE_CASE = cha[1]
else:
_SCREAMING_SNAKE_CASE = ch.decode(UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_SCREAMING_SNAKE_CASE = sys.stdin.fileno()
_SCREAMING_SNAKE_CASE = termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__ , termios.TCSADRAIN , UpperCamelCase__ )
return ch
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 168 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = (UniPCMultistepScheduler,)
A_ = (('''num_inference_steps''', 25),)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> int:
UpperCamelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**lowerCamelCase_)
return config
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , **lowerCamelCase_) -> int:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_)
UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_)
new_scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1):
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , lowerCamelCase_=0 , **lowerCamelCase_) -> Any:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_)
UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_)
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , lowerCamelCase_=None , **lowerCamelCase_) -> List[Any]:
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).prev_sample
return sample
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = dict(self.forward_default_kwargs)
UpperCamelCase = kwargs.pop('''num_inference_steps''' , lowerCamelCase_)
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , '''set_timesteps'''):
scheduler.set_timesteps(lowerCamelCase_)
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , '''set_timesteps'''):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) -> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCamelCase = UniPCMultistepScheduler(**self.get_scheduler_config())
UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_)
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_)
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCamelCase_)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def UpperCAmelCase__ ( self) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
UpperCamelCase = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCamelCase_)
self.check_over_configs(lower_order_final=lowerCamelCase_)
def UpperCAmelCase__ ( self) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.2464) < 1e-3
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = self.full_loop(prediction_type='''v_prediction''')
UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_))
assert abs(result_mean.item() - 0.1014) < 1e-3
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> str:
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_)
UpperCamelCase = scheduler_class(**lowerCamelCase_)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps | 34 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class __a :
def __init__( self : int ,lowerCamelCase : str = None ,lowerCamelCase : uuid.UUID = None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if not conversation_id:
__SCREAMING_SNAKE_CASE = uuid.uuida()
if past_user_inputs is None:
__SCREAMING_SNAKE_CASE = []
if generated_responses is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = conversation_id
__SCREAMING_SNAKE_CASE = past_user_inputs
__SCREAMING_SNAKE_CASE = generated_responses
__SCREAMING_SNAKE_CASE = text
def __eq__( self : str ,lowerCamelCase : Tuple ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__SCREAMING_SNAKE_CASE = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__SCREAMING_SNAKE_CASE = text
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : str ):
'''simple docstring'''
self.generated_responses.append(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__SCREAMING_SNAKE_CASE = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_snake_case, R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ', )
class __a ( _snake_case ):
def __init__( self : str ,*lowerCamelCase : Tuple ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase ,**lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : Union[str, Any]=None ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if min_length_for_response is not None:
__SCREAMING_SNAKE_CASE = min_length_for_response
if minimum_tokens is not None:
__SCREAMING_SNAKE_CASE = minimum_tokens
if "max_length" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] ,lowerCamelCase : Union[Conversation, List[Conversation]] ,lowerCamelCase : Tuple=0 ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase ,num_workers=lowerCamelCase ,**lowerCamelCase )
if isinstance(lowerCamelCase ,lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Conversation ,lowerCamelCase : Optional[int]=32 ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__SCREAMING_SNAKE_CASE = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple=10 ,**lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__SCREAMING_SNAKE_CASE = max_length - minimum_tokens
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__SCREAMING_SNAKE_CASE = model_inputs["""attention_mask"""][:, -trim:]
__SCREAMING_SNAKE_CASE = model_inputs.pop("""conversation""" )
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = self.model.generate(**lowerCamelCase ,**lowerCamelCase )
if self.model.config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs["""output_ids"""]
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=lowerCamelCase ,clean_up_tokenization_spaces=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Conversation ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _a , unittest.TestCase ):
a : Tuple = FunnelTokenizer
a : Dict = FunnelTokenizerFast
a : str = True
a : List[Any] = True
def UpperCamelCase_ ( self ):
super().setUp()
_lowercase = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **__UpperCamelCase ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCamelCase_ ( self , **__UpperCamelCase ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCamelCase_ ( self , __UpperCamelCase ):
_lowercase = """UNwant\u00E9d,running"""
_lowercase = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self ):
_lowercase = self.tokenizer_class(self.vocab_file )
_lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self ):
_lowercase = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
_lowercase = tokenizer("""UNwant\u00E9d,running""" )
_lowercase = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_lowercase = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len ) | 287 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( _a ):
a : Optional[Any] = ['''image_processor''', '''tokenizer''']
a : Optional[Any] = '''ChineseCLIPImageProcessor'''
a : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
_lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_lowercase = kwargs.pop("""feature_extractor""" )
_lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_lowercase = self.image_processor
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowercase = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
_lowercase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCamelCase_ ( self ):
_lowercase = self.tokenizer.model_input_names
_lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class | 287 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowercase_ : int = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase_ : Dict = Features({"""text""": Value("""string""" )} )
lowercase_ : Optional[int] = Features({} )
lowercase_ : Dict = """text"""
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {self.text_column: "text"} | 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : int = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Union[str, Any] = use_attention_mask
A_ : Tuple = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Optional[Any] = vocab_size
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : int = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : str = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case_ , )
return config, input_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Dict = config_and_inputs
A_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : int = model_class_name.from_pretrained('distilbert-base-uncased' )
A_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
A_ : List[str] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
A_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
A_ : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , snake_case_ )
A_ : Any = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) ) | 302 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( __lowercase , unittest.TestCase ):
_snake_case ='''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Dict=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowerCAmelCase ) )
UpperCAmelCase_ =np.random.RandomState(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# warmup pass to apply optimizations
UpperCAmelCase_ =pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ort.SessionOptions()
UpperCAmelCase_ =False
return options
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
UpperCAmelCase_ =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 54 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , )->Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ : Any = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
A_ : str = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 590 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 579 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 579 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : str = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mra"""
def __init__( self , A_=50265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1e-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = block_per_row
UpperCamelCase = approx_mode
UpperCamelCase = initial_prior_first_n_blocks
UpperCamelCase = initial_prior_diagonal_n_blocks
| 3 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
A__ = []
def generate(SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 514 | 0 |
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 713 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_A = 'mvp'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _A=50267 , _A=1024 , _A=12 , _A=4096 , _A=16 , _A=12 , _A=4096 , _A=16 , _A=0.0 , _A=0.0 , _A="gelu" , _A=1024 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=0.0 , _A=False , _A=True , _A=1 , _A=0 , _A=2 , _A=True , _A=2 , _A=2 , _A=False , _A=100 , _A=800 , **_A , ) -> Optional[Any]:
__a : Optional[int] = vocab_size
__a : str = max_position_embeddings
__a : Dict = d_model
__a : Optional[Any] = encoder_ffn_dim
__a : int = encoder_layers
__a : str = encoder_attention_heads
__a : Optional[Any] = decoder_ffn_dim
__a : Optional[int] = decoder_layers
__a : Tuple = decoder_attention_heads
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Dict = activation_dropout
__a : List[str] = activation_function
__a : List[Any] = init_std
__a : Optional[Any] = encoder_layerdrop
__a : Union[str, Any] = decoder_layerdrop
__a : Optional[Any] = classifier_dropout
__a : List[Any] = use_cache
__a : str = encoder_layers
__a : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__a : Optional[int] = use_prompt
__a : int = prompt_length
__a : List[Any] = prompt_mid_dim
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _A ):
__a : Optional[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 597 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
SCREAMING_SNAKE_CASE_ = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 597 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =set()
SCREAMING_SNAKE_CASE_ : Any =[]
def parse_line(lowerCAmelCase_ : Tuple ):
for line in fp:
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] =line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : List[str] ='\n'.join(lowerCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowerCAmelCase_ )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE_ : int =line.strip()
buffer.append(lowerCAmelCase_ )
if from_gh:
for filename in os.listdir(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict =os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =set()
SCREAMING_SNAKE_CASE_ : Tuple =[os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase_ ,lowerCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
return values.split(',' )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 153 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def __lowerCamelCase ( self , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Tuple =np.random.RandomState(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Any =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : int =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Dict =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : int =False
return options
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ='A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : Dict =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Tuple =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ : Tuple =LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ='A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : int =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_ : List[str] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Dict =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 153 | 1 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ : Optional[Any] = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ : List[str] = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
snake_case_ : Any = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ : Optional[Any] = '0' + str(bin(__snake_case ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ : Union[str, Any] = len(bin(__snake_case )[3:] ) # Find 2's complement of number
snake_case_ : Any = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
snake_case_ : Optional[Any] = (
'1' + '0' * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case : str = """true"""
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]=82 , UpperCamelCase_ : str=16 ) -> str:
"""simple docstring"""
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(UpperCamelCase_ )
lowerCAmelCase__ = RegressionDataset(length=UpperCamelCase_ )
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return model, ddp_model, dataloader
def _UpperCamelCase ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : Optional[int]=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(UpperCamelCase_ : int ):
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase_ : List[str] ):
if use_longest:
return tokenizer.pad(UpperCamelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=16 )
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = Accelerator(dispatch_batches=UpperCamelCase_ , split_batches=UpperCamelCase_ )
lowerCAmelCase__ = get_dataloader(UpperCamelCase_ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase_ )
targs.append(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(UpperCamelCase_ ), torch.cat(UpperCamelCase_ )
return logits, targs
def _UpperCamelCase ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : List[Any]=82 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : List[str]=16 ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert (
len(UpperCamelCase_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase_ )}"
def _UpperCamelCase ( UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(UpperCamelCase_ , UpperCamelCase_ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['no']
model.to(UpperCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase_ )
with torch.inference_mode():
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase_ , references=batch['labels'] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['labels']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase_ , UpperCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(UpperCamelCase_ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["""GLPNFeatureExtractor"""]
__snake_case : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE = TypeVar('T')
class a__ ( Generic[T] ):
def __init__( self :Optional[Any] , _lowerCamelCase :list[T] , _lowerCamelCase :Callable[[T, T], T] ):
'''simple docstring'''
UpperCamelCase_ : Any | T =None
UpperCamelCase_ : int =len(_snake_case )
UpperCamelCase_ : list[T] =[any_type for _ in range(self.N )] + arr
UpperCamelCase_ : str =fnc
self.build()
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
UpperCamelCase_ : int =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :int , _lowerCamelCase :T ):
'''simple docstring'''
p += self.N
UpperCamelCase_ : List[Any] =v
while p > 1:
UpperCamelCase_ : Union[str, Any] =p // 2
UpperCamelCase_ : Optional[int] =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ): # noqa: E741
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =l + self.N, r + self.N
UpperCamelCase_ : T | None =None
while l <= r:
if l % 2 == 1:
UpperCamelCase_ : str =self.st[l] if res is None else self.fn(_snake_case , self.st[l] )
if r % 2 == 0:
UpperCamelCase_ : int =self.st[r] if res is None else self.fn(_snake_case , self.st[r] )
UpperCamelCase_ : Any =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ):
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
UpperCamelCase_ : Optional[Any] =reduce(__lowercase , test_array[i : j + 1] )
UpperCamelCase_ : Tuple =reduce(__lowercase , test_array[i : j + 1] )
UpperCamelCase_ : str =reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 357 |
from __future__ import annotations
def lowercase_ (A : list , A : int | None = None , A : int | None = None ):
if start is None:
snake_case__ : Any = 0
if end is None:
snake_case__ : List[str] = len(A ) - 1
if start >= end:
return
snake_case__ : Optional[int] = (start + end) // 2
slowsort(A , A , A )
slowsort(A , mid + 1 , A )
if sequence[end] < sequence[mid]:
snake_case__ , snake_case__ : Union[str, Any] = sequence[mid], sequence[end]
slowsort(A , A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 478 | 0 |
def _a ( lowerCamelCase__ ) -> List[str]:
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
lowerCamelCase_ : List[str] = sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
lowerCamelCase_ : int = b.T
lowerCamelCase_ : Tuple = np.sum(np.square(lowerCamelCase__ ) , axis=1 )
lowerCamelCase_ : int = np.sum(np.square(lowerCamelCase__ ) , axis=0 )
lowerCamelCase_ : Optional[Any] = np.matmul(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
lowerCamelCase_ : Any = x.reshape(-1 , 3 )
lowerCamelCase_ : List[Any] = squared_euclidean_distance(lowerCamelCase__ , lowerCamelCase__ )
return np.argmin(lowerCamelCase__ , axis=1 )
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : int = ['pixel_values']
def __init__(self : Tuple , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : bool = True , **_snake_case : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowerCamelCase_ : Union[str, Any] = size if size is not None else {'height': 256, 'width': 256}
lowerCamelCase_ : Union[str, Any] = get_size_dict(_snake_case )
lowerCamelCase_ : int = np.array(_snake_case ) if clusters is not None else None
lowerCamelCase_ : Dict = do_resize
lowerCamelCase_ : str = size
lowerCamelCase_ : str = resample
lowerCamelCase_ : Any = do_normalize
lowerCamelCase_ : Any = do_color_quantize
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : List[str] = rescale(image=_snake_case , scale=1 / 127.5 , data_format=_snake_case )
lowerCamelCase_ : int = image - 1
return image
def UpperCAmelCase_ (self : Dict , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_snake_case : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Optional[int] = size if size is not None else self.size
lowerCamelCase_ : Any = get_size_dict(_snake_case )
lowerCamelCase_ : str = resample if resample is not None else self.resample
lowerCamelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase_ : List[Any] = clusters if clusters is not None else self.clusters
lowerCamelCase_ : Tuple = np.array(_snake_case )
lowerCamelCase_ : Optional[Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCamelCase_ : int = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_normalize:
lowerCamelCase_ : List[Any] = [self.normalize(image=_snake_case ) for image in images]
if do_color_quantize:
lowerCamelCase_ : str = [to_channel_dimension_format(_snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase_ : Optional[int] = np.array(_snake_case )
lowerCamelCase_ : Any = color_quantize(_snake_case , _snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase_ : Optional[Any] = images.shape[0]
lowerCamelCase_ : int = images.reshape(_snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase_ : Optional[int] = list(_snake_case )
else:
lowerCamelCase_ : str = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCamelCase_ : int = {'input_ids': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 144 | 0 |
def lowerCamelCase__ ( snake_case_ : int = 3 , snake_case_ : int = 7 , snake_case_ : int = 100_0000 ) -> int:
__snake_case = 0
__snake_case = 1
for current_denominator in range(1 , limit + 1 ):
__snake_case = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__snake_case = current_numerator
__snake_case = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 592 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CpmAntTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_snake_case : Optional[Any] = """今天天气真好!"""
_snake_case : List[Any] = ["""今天""", """天气""", """真""", """好""", """!"""]
_snake_case : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : List[Any] = """今天天气真好!"""
_snake_case : Union[str, Any] = [tokenizer.bos_token] + tokens
_snake_case : Dict = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
_snake_case : Dict = tokenizer.decode(a_ )
self.assertEqual(a_, a_ )
| 28 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_snake_case : List[str] = NewType('DataClass', Any)
_snake_case : str = NewType('DataClassType', Any)
def a_ ( lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a_ ( lowerCAmelCase_ : list ):
__lowerCAmelCase = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( *,
lowerCAmelCase_ : Union[str, List[str]] = None, lowerCAmelCase_ : str = None, lowerCAmelCase_ : Any = dataclasses.MISSING, lowerCAmelCase_ : Callable[[], Any] = dataclasses.MISSING, lowerCAmelCase_ : dict = None, **lowerCAmelCase_ : str, ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowerCAmelCase = {}
if aliases is not None:
__lowerCAmelCase = aliases
if help is not None:
__lowerCAmelCase = help
return dataclasses.field(metadata=lowerCAmelCase_, default=lowerCAmelCase_, default_factory=lowerCAmelCase_, **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__lowerCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
__lowerCAmelCase = [dataclass_types]
__lowerCAmelCase = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser , lowerCAmelCase_ : dataclasses.Field ) -> List[Any]:
__lowerCAmelCase = f"""--{field.name}"""
__lowerCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
__lowerCAmelCase = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = [aliases]
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f""" Problem encountered in field '{field.name}'.""" )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
__lowerCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowerCAmelCase = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowerCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
__lowerCAmelCase = field.type.__args__
else:
__lowerCAmelCase = [x.value for x in field.type]
__lowerCAmelCase = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
__lowerCAmelCase = field.default
else:
__lowerCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowerCAmelCase = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
__lowerCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowerCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowerCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowerCAmelCase = '?'
# This is the value that will get picked if we do --field_name (without value)
__lowerCAmelCase = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = field.type.__args__[0]
__lowerCAmelCase = '+'
if field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowerCAmelCase = True
else:
__lowerCAmelCase = field.type
if field.default is not dataclasses.MISSING:
__lowerCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase = field.default_factory()
else:
__lowerCAmelCase = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowerCAmelCase = False
parser.add_argument(f"""--no_{field.name}""" , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : DataClassType ) -> List[Any]:
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
__lowerCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
__lowerCAmelCase = self
try:
__lowerCAmelCase = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
__lowerCAmelCase = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
__lowerCAmelCase = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowerCAmelCase = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowerCAmelCase = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowerCAmelCase , __lowerCAmelCase = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
__lowerCAmelCase = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
__lowerCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowerCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowerCAmelCase , __lowerCAmelCase = self.parse_known_args(args=lowerCAmelCase_ )
__lowerCAmelCase = []
for dtype in self.dataclass_types:
__lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
__lowerCAmelCase = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase ( self : Dict , lowerCAmelCase_ : Dict[str, Any] , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__lowerCAmelCase = set(args.keys() )
__lowerCAmelCase = []
for dtype in self.dataclass_types:
__lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
__lowerCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowerCAmelCase = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}""" )
return tuple(lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
__lowerCAmelCase = json.loads(open_json_file.read() )
__lowerCAmelCase = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__lowerCAmelCase = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53 | 1 |
from itertools import count
def _A ( _UpperCamelCase = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaPipeline
SCREAMING_SNAKE_CASE_ : Dict = [
"""image_embeds""",
"""negative_image_embeds""",
]
SCREAMING_SNAKE_CASE_ : int = ["""image_embeds""", """negative_image_embeds"""]
SCREAMING_SNAKE_CASE_ : int = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
@property
def a_ ( self : str ) -> Dict:
'''simple docstring'''
return 32
@property
def a_ ( self : int ) -> List[str]:
'''simple docstring'''
return 32
@property
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def a_ ( self : Dict ) -> str:
'''simple docstring'''
return 100
@property
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_UpperCAmelCase : List[str] = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.dummy_unet
_UpperCAmelCase : Optional[int] = self.dummy_movq
_UpperCAmelCase : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCAmelCase_ , )
_UpperCAmelCase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=0 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith('''mps''' ):
_UpperCAmelCase : str = torch.manual_seed(UpperCAmelCase_ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Dict = self.pipeline_class(**UpperCAmelCase_ )
_UpperCAmelCase : int = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_UpperCAmelCase : Dict = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : int = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
_UpperCAmelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_UpperCAmelCase : Any = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = '''red cat, 4k photo'''
_UpperCAmelCase : Dict = torch.Generator(device='''cuda''' ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_UpperCAmelCase : List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
_UpperCAmelCase : Dict = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type='''np''' , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 416 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Optional[int] = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(UpperCamelCase__ ) , version.parse(UpperCamelCase__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> None:
"""simple docstring"""
__lowerCamelCase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = requirement, None, None
else:
__lowerCamelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
__lowerCamelCase , __lowerCamelCase = match[0]
__lowerCamelCase = want_full.split(',' ) # there could be multiple requirements
__lowerCamelCase = {}
for w in want_range:
__lowerCamelCase = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
__lowerCamelCase , __lowerCamelCase = match[0]
__lowerCamelCase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowerCamelCase = '.'.join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return
# check if any version is installed
try:
__lowerCamelCase = importlib.metadata.version(UpperCamelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCamelCase__ , UpperCamelCase__ )
| 469 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ='▁'
__lowerCAmelCase : Any ={'vocab_file': 'prophetnet.tokenizer'}
__lowerCAmelCase : Optional[Any] ={
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
__lowerCAmelCase : List[Any] ={
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
__lowerCAmelCase : Tuple ={
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = collections.OrderedDict()
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as reader:
__SCREAMING_SNAKE_CASE : List[str] = reader.readlines()
for index, token in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = token.rstrip('''\n''' )
__SCREAMING_SNAKE_CASE : int = index
return vocab
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict="[SEP]" , lowerCAmelCase__ :Any="[SEP]" , lowerCAmelCase__ :Union[str, Any]="[SEP]" , lowerCAmelCase__ :Union[str, Any]="[UNK]" , lowerCAmelCase__ :Optional[int]="[PAD]" , lowerCAmelCase__ :str="[CLS]" , lowerCAmelCase__ :List[Any]="[MASK]" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :List[str] , ) -> None:
__SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__SCREAMING_SNAKE_CASE : int = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__SCREAMING_SNAKE_CASE : int = f'''[unused{i}]'''
__SCREAMING_SNAKE_CASE : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCAmelCase__ )
def __getstate__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Any = None
return state
def __setstate__( self :str , lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__( self :List[Any] ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str ) -> str:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE : Any = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__( self :List[str] , lowerCAmelCase__ :int ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = ''''''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ''' ''' ).strip()
return out_string
def __magic_name__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 260 |
from heapq import heappop, heappush
import numpy as np
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = grid.shape
__SCREAMING_SNAKE_CASE : Any = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : Any = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Optional[Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : int = np.empty((rows, cols) , dtype=lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = None
while queue:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : List[str] = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : Optional[int] = dist + 1
__SCREAMING_SNAKE_CASE : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 1 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(__snake_case, __snake_case ) or not isinstance(__snake_case, __snake_case ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A__ : str =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A__ : int =float(factorial(__snake_case ) )
coefficient /= factorial(__snake_case ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 215 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case : str = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ : List[Any] =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Optional[int] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=None ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ : List[str] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ : int =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ : Any =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
A__ : Optional[Any] =os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Tuple =check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
A__ : List[str] ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ : int =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ : Dict =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : List[str] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ : int =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ , A__ : List[str] =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_ )
A__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ : Optional[int] =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ : Dict =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 215 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCAmelCase ={
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCAmelCase ={
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase =collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCAmelCase =collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCAmelCase =r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ :
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A__ = len(UpperCamelCase__ )
A__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.""" )
A__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = 4 , ):
'''simple docstring'''
A__ = reader_input["input_ids"]
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCamelCase__ )
A__ = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id )
else:
A__ = len(UpperCamelCase__ )
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : str = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""] | 261 |
"""simple docstring"""
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A ) == 26
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(A )
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=A ) )
print(timeit("is_pangram_faster()" , setup=A ) )
print(timeit("is_pangram_fastest()" , setup=A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 261 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : "DiagonalGaussianDistribution"
class __lowercase ( a_ , a_ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = True
@register_to_config
def __init__( self , A = 3 , A = 3 , A = ("DownEncoderBlock2D",) , A = ("UpDecoderBlock2D",) , A = (64,) , A = 1 , A = "silu" , A = 4 , A = 32 , A = 32 , A = 0.18215 , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCamelCase = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
lowerCamelCase = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowerCamelCase = nn.Convad(A , A , 1 )
lowerCamelCase = False
lowerCamelCase = False
# only relevant if vae tiling is enabled
lowerCamelCase = self.config.sample_size
lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowerCamelCase = 0.25
def __A ( self , A , A=False ) -> List[str]:
'''simple docstring'''
if isinstance(A , (Encoder, Decoder) ):
lowerCamelCase = value
def __A ( self , A = True ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = use_tiling
def __A ( self ) -> str:
'''simple docstring'''
self.enable_tiling(A )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = True
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __A ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
lowerCamelCase = {}
def fn_recursive_add_processors(A , A , A ):
if hasattr(A , """set_processor""" ):
lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def __A ( self , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(A )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(A , A , A ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __A ( self , A , A = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
lowerCamelCase = [self.encoder(A ) for x_slice in x.split(1 )]
lowerCamelCase = torch.cat(A )
else:
lowerCamelCase = self.encoder(A )
lowerCamelCase = self.quant_conv(A )
lowerCamelCase = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def __A ( self , A , A = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
lowerCamelCase = self.post_quant_conv(A )
lowerCamelCase = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def __A ( self , A , A = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
lowerCamelCase = [self._decode(A ).sample for z_slice in z.split(1 )]
lowerCamelCase = torch.cat(A )
else:
lowerCamelCase = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __A ( self , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __A ( self , A , A = True ) -> AutoencoderKLOutput:
'''simple docstring'''
lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCamelCase = []
for i in range(0 , x.shape[2] , A ):
lowerCamelCase = []
for j in range(0 , x.shape[3] , A ):
lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCamelCase = self.encoder(A )
lowerCamelCase = self.quant_conv(A )
row.append(A )
rows.append(A )
lowerCamelCase = []
for i, row in enumerate(A ):
lowerCamelCase = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCamelCase = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
lowerCamelCase = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
lowerCamelCase = torch.cat(A , dim=2 )
lowerCamelCase = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def __A ( self , A , A = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCamelCase = []
for i in range(0 , z.shape[2] , A ):
lowerCamelCase = []
for j in range(0 , z.shape[3] , A ):
lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCamelCase = self.post_quant_conv(A )
lowerCamelCase = self.decoder(A )
row.append(A )
rows.append(A )
lowerCamelCase = []
for i, row in enumerate(A ):
lowerCamelCase = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCamelCase = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
lowerCamelCase = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
lowerCamelCase = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def __A ( self , A , A = False , A = True , A = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowerCamelCase = sample
lowerCamelCase = self.encode(A ).latent_dist
if sample_posterior:
lowerCamelCase = posterior.sample(generator=A )
else:
lowerCamelCase = posterior.mode()
lowerCamelCase = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 457 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
UpperCamelCase : bool = field(default=a_ , metadata={"help": "Whether to SortishSamler or not."} )
UpperCamelCase : bool = field(
default=a_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCamelCase : bool = field(default=a_ , metadata={"help": "whether to use adafactor"} )
UpperCamelCase : Optional[float] = field(
default=a_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(
default=a_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(default=a_ , metadata={"help": "Dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(
default=a_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[str] = field(
default="linear" , metadata={"help": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 457 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , *,
lowerCAmelCase : int = 4 , lowerCAmelCase : int = 768 , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =nn.Parameter(torch.zeros(lowerCAmelCase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE_: Tuple =clip_extra_context_tokens
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(
lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =nn.LayerNorm(lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , *, lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =image_embeddings.shape[0]
SCREAMING_SNAKE_CASE_: Any =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =classifier_free_guidance_embeddings.expand(
lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE_: List[Any] =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE_: Optional[Any] =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE_: Dict =self.embedding_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE_: Optional[Any] =self.clip_extra_context_tokens_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =clip_extra_context_tokens.reshape(lowerCAmelCase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE_: str =clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.encoder_hidden_states_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.text_encoder_hidden_states_norm(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 36 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : int = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['''PoolFormerFeatureExtractor''']
__lowerCAmelCase : Tuple = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
'''simple docstring'''
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
__a , __a : Optional[Any] = 0, 1
while True:
__a , __a : List[str] = b, a + b
yield b
def _snake_case ( lowercase = 1_0_0_0 ) -> int:
__a : Dict = 1
__a : Union[str, Any] = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 697 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) ) | 697 | 1 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: Optional[int], _lowerCamelCase: Dict, _lowerCamelCase: str="attention" ) -> str:
'''simple docstring'''
lowerCAmelCase = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCAmelCase = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCAmelCase = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCAmelCase = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __magic_name__ ( _lowerCamelCase: Dict, _lowerCamelCase: Any, _lowerCamelCase: str, _lowerCamelCase: Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCAmelCase = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCAmelCase = (wi_a, wi_a)
else:
lowerCAmelCase = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCAmelCase = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __magic_name__ ( _lowerCamelCase: Any, _lowerCamelCase: Tuple, _lowerCamelCase: Dict, _lowerCamelCase: Optional[Any] ) -> str:
'''simple docstring'''
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __magic_name__ ( _lowerCamelCase: dict, *, _lowerCamelCase: int, _lowerCamelCase: bool ) -> Any:
'''simple docstring'''
lowerCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
lowerCAmelCase = {'''/'''.join(_lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''', _lowerCamelCase )
lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase = tax_layer_norm_lookup(_lowerCamelCase, _lowerCamelCase, '''encoder''', '''pre_attention_layer_norm''' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(_lowerCamelCase, _lowerCamelCase, '''encoder''', '''attention''' )
lowerCAmelCase = layer_norm
lowerCAmelCase = k.T
lowerCAmelCase = o.T
lowerCAmelCase = q.T
lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase = tax_layer_norm_lookup(_lowerCamelCase, _lowerCamelCase, '''encoder''', '''pre_mlp_layer_norm''' )
lowerCAmelCase , lowerCAmelCase = tax_mlp_lookup(_lowerCamelCase, _lowerCamelCase, '''encoder''', _lowerCamelCase )
lowerCAmelCase = layer_norm
if split_mlp_wi:
lowerCAmelCase = wi[0].T
lowerCAmelCase = wi[1].T
else:
lowerCAmelCase = wi.T
lowerCAmelCase = wo.T
lowerCAmelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowerCAmelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase = tax_layer_norm_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', '''pre_self_attention_layer_norm''' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', '''self_attention''' )
lowerCAmelCase = layer_norm
lowerCAmelCase = k.T
lowerCAmelCase = o.T
lowerCAmelCase = q.T
lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase = tax_layer_norm_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', '''pre_cross_attention_layer_norm''' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = tax_attention_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', '''encoder_decoder_attention''' )
lowerCAmelCase = layer_norm
lowerCAmelCase = k.T
lowerCAmelCase = o.T
lowerCAmelCase = q.T
lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase = tax_layer_norm_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', '''pre_mlp_layer_norm''' )
lowerCAmelCase , lowerCAmelCase = tax_mlp_lookup(_lowerCamelCase, _lowerCamelCase, '''decoder''', _lowerCamelCase )
lowerCAmelCase = layer_norm
if split_mlp_wi:
lowerCAmelCase = wi[0].T
lowerCAmelCase = wi[1].T
else:
lowerCAmelCase = wi.T
lowerCAmelCase = wo.T
lowerCAmelCase = old['''decoder/decoder_norm/scale''']
lowerCAmelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def __magic_name__ ( _lowerCamelCase: Dict, _lowerCamelCase: bool ) -> str:
'''simple docstring'''
lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCAmelCase = state_dict['''shared.weight''']
return state_dict
def __magic_name__ ( _lowerCamelCase: List[str], _lowerCamelCase: Union[str, Any], _lowerCamelCase: Optional[Any], _lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = checkpoints.load_tax_checkpoint(_lowerCamelCase )
lowerCAmelCase = convert_tax_to_pytorch(_lowerCamelCase, num_layers=config.num_layers, is_encoder_only=_lowerCamelCase )
lowerCAmelCase = make_state_dict(_lowerCamelCase, _lowerCamelCase )
model.load_state_dict(_lowerCamelCase, strict=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: Optional[int], _lowerCamelCase: Union[str, Any], _lowerCamelCase: bool = False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = TaConfig.from_json_file(_lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase = TaEncoderModel(_lowerCamelCase )
else:
lowerCAmelCase = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCamelCase )
print('''Done''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 535 |
"""simple docstring"""
from collections import namedtuple
UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: str, _lowerCamelCase: str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 1000 ):
UpperCAmelCase : Any = 2**power
UpperCAmelCase : Union[str, Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Tuple = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 359 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
UpperCAmelCase : str = DatasetInfosDict.from_directory(UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : DatasetInfo ):
UpperCAmelCase : List[Any] = str(UpperCamelCase )
dataset_info.write_to_directory(UpperCamelCase )
UpperCAmelCase : List[str] = DatasetInfo.from_directory(UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase , """dataset_info.json""" ) )
def _snake_case ( ):
UpperCAmelCase : List[str] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase : str = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase : Union[str, Any] = yaml.safe_dump(UpperCamelCase )
UpperCAmelCase : List[str] = yaml.safe_load(UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
UpperCAmelCase : List[str] = DatasetInfo()
UpperCAmelCase : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : DatasetInfosDict ):
UpperCAmelCase : List[str] = str(UpperCamelCase )
dataset_infos_dict.write_to_directory(UpperCamelCase )
UpperCAmelCase : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase , """README.md""" ) )
| 359 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = BigBirdConfig.from_json_file(lowercase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
A__ = BigBirdForQuestionAnswering(lowercase_ )
else:
A__ = BigBirdForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowercase_ , lowercase_ , is_trivia_qa=lowercase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case_ = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 421 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCAmelCase__: Optional[int] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCAmelCase__: int = '''document_qa'''
UpperCAmelCase__: Union[str, Any] = AutoProcessor
UpperCAmelCase__: Optional[Any] = VisionEncoderDecoderModel
UpperCAmelCase__: Union[str, Any] = ['''image''', '''text''']
UpperCAmelCase__: Union[str, Any] = ['''text''']
def __init__( self , *A__ , **A__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A__ , **A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
A__ : List[str] = task_prompt.replace("""{user_input}""" , A__ )
A__ : Optional[Any] = self.pre_processor.tokenizer(
A__ , add_special_tokens=A__ , return_tensors="""pt""" ).input_ids
A__ : int = self.pre_processor(A__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A__ , ).sequences
def __A ( self , A__ ):
A__ : Optional[int] = self.pre_processor.batch_decode(A__ )[0]
A__ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
A__ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
A__ : List[Any] = re.sub(r"""<.*?>""" , """""" , A__ , count=1 ).strip() # remove first task start token
A__ : Tuple = self.pre_processor.tokenajson(A__ )
return sequence["answer"]
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
SCREAMING_SNAKE_CASE = '\nHuman: <<task>>\n\nAssistant: '
SCREAMING_SNAKE_CASE = 'huggingface-tools/default-prompts'
SCREAMING_SNAKE_CASE = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="run" ):
if prompt_or_repo_id is None:
__a = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCAmelCase__ ) is not None:
return prompt_or_repo_id
__a = cached_file(
lowerCAmelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 99 |
from __future__ import annotations
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = sum(A )
create_state_space_tree(A , A , A , A , A , A )
return result
def __lowerCAmelCase ( A , A , A , A , A , A , ):
if sum(A ) > max_sum or (remaining_nums_sum + sum(A )) < max_sum:
return
if sum(A ) == max_sum:
result.append(A )
return
for index in range(A , len(A ) ):
create_state_space_tree(
A , A , index + 1 , [*path, nums[index]] , A , remaining_nums_sum - nums[index] , )
_a: Optional[int] = [3, 34, 4, 12, 5, 2]
_a: int = 9
_a: List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 162 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
_a = len(UpperCamelCase )
_a = max(UpperCamelCase )
_a = min(UpperCamelCase )
# create the counting array
_a = coll_max + 1 - coll_min
_a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase ):
_a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase ) ):
_a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
return "".join([chr(UpperCamelCase ) for i in counting_sort([ord(UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_snake_case : Tuple = input('Enter numbers separated by a comma:\n').strip()
_snake_case : int = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 715 |
'''simple docstring'''
from manim import *
class A ( _a ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = Text('''CPU''' , font_size=24 )
_a = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase_ )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = Text('''GPU''' , font_size=24 )
_a = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = Text('''Model''' , font_size=24 )
_a = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase_ )
_a = []
for i, rect in enumerate(lowerCAmelCase_ ):
rect.set_stroke(lowerCAmelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase_ , buff=0.0 )
self.add(lowerCAmelCase_ )
cpu_targs.append(lowerCAmelCase_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_a = Text('''Loaded Checkpoint''' , font_size=24 )
_a = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , aligned_edge=lowerCAmelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase_ , lowerCAmelCase_ )
_a = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase_ ) , Write(lowerCAmelCase_ ) )
self.play(Write(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(lowerCAmelCase_ ):
_a = fill.copy().set_fill(lowerCAmelCase_ , opacity=0.7 )
target.move_to(lowerCAmelCase_ )
first_animations.append(GrowFromCenter(lowerCAmelCase_ , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase_ , run_time=1.5 ) )
self.play(*lowerCAmelCase_ )
self.play(*lowerCAmelCase_ )
self.wait()
| 377 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Optional[int]=13 , __magic_name__ :Optional[int]=32 , __magic_name__ :Optional[int]=3 , __magic_name__ :Any=4 , __magic_name__ :Dict=[10, 20, 30, 40] , __magic_name__ :Tuple=[2, 2, 3, 2] , __magic_name__ :str=True , __magic_name__ :Optional[int]=True , __magic_name__ :Tuple=37 , __magic_name__ :Dict="gelu" , __magic_name__ :str=10 , __magic_name__ :List[Any]=0.02 , __magic_name__ :Optional[int]=["stage2", "stage3", "stage4"] , __magic_name__ :Optional[Any]=3 , __magic_name__ :Dict=None , ) ->Dict:
lowercase : Dict = parent
lowercase : List[Any] = batch_size
lowercase : Tuple = image_size
lowercase : Tuple = num_channels
lowercase : Tuple = num_stages
lowercase : List[str] = hidden_sizes
lowercase : Any = depths
lowercase : Dict = is_training
lowercase : List[str] = use_labels
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = type_sequence_label_size
lowercase : int = initializer_range
lowercase : List[str] = out_features
lowercase : str = num_labels
lowercase : Optional[Any] = scope
lowercase : Optional[Any] = num_stages
def __snake_case ( self :List[str] ) ->int:
lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Union[str, Any] = None
if self.use_labels:
lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self :Optional[int] ) ->int:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __snake_case ( self :List[Any] ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__magic_name__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__magic_name__ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __snake_case ( self :List[Any] , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :int ) ->Optional[Any]:
lowercase : int = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : List[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __snake_case ( self :Any ) ->Dict:
lowercase : int = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Any = config_and_inputs
lowercase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase (__snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
def __snake_case ( self :Dict ) ->Optional[int]:
lowercase : Union[str, Any] = UperNetModelTester(self )
lowercase : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __snake_case ( self :Dict ) ->Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Union[str, Any] ) ->Optional[int]:
return
def __snake_case ( self :Optional[int] ) ->Dict:
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(__magic_name__ )
lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Union[str, Any] = [*signature.parameters.keys()]
lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :List[Any] ) ->Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def __snake_case ( self :List[str] ) ->Any:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def __snake_case ( self :Optional[Any] ) ->Any:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __snake_case ( self :Union[str, Any] ) ->Optional[int]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __snake_case ( self :Any ) ->Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case ( self :List[str] ) ->Tuple:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self :List[str] ) ->Dict:
pass
def __snake_case ( self :Dict ) ->List[str]:
def check_hidden_states_output(__magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :Any ):
lowercase : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowercase : int = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Any = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __snake_case ( self :str ) ->Optional[int]:
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int = _config_zero_init(__magic_name__ )
lowercase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase : str = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def __snake_case ( self :List[str] ) ->List[Any]:
pass
@slow
def __snake_case ( self :Union[str, Any] ) ->Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase ( ) -> Tuple:
lowercase : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowercase : Dict = Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Optional[Any] ) ->str:
lowercase : List[str] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowercase : int = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(__magic_name__ )
lowercase : str = prepare_img()
lowercase : List[Any] = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
lowercase : Optional[Any] = model(**__magic_name__ )
lowercase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowercase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
def __snake_case ( self :str ) ->Union[str, Any]:
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowercase : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(__magic_name__ )
lowercase : Dict = prepare_img()
lowercase : List[Any] = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
lowercase : Dict = model(**__magic_name__ )
lowercase : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowercase : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 264 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase (__snake_case ):
def __init__( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] ) ->Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self :Optional[int] , __magic_name__ :int = 1 , __magic_name__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ :float = 0.0 , __magic_name__ :int = 50 , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __magic_name__ ):
lowercase : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Union[str, Any] = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : int = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , eta=__magic_name__ , use_clipped_model_output=__magic_name__ , generator=__magic_name__ ).prev_sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 264 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class snake_case ( unittest.TestCase ):
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(batch_size=2 , length=lowercase__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(lowercase__ , axis=-1 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_sharper(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_smoother(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ = top_k_warp_safety_check(lowercase__ , lowercase__ , cur_len=lowercase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ = np.exp(top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE_ = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , lowercase__ )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
SCREAMING_SNAKE_CASE_ = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
# with processor list
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , lowercase__ )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
SCREAMING_SNAKE_CASE_ = jax.jit(lowercase__ )
SCREAMING_SNAKE_CASE_ = jax.jit(lowercase__ )
SCREAMING_SNAKE_CASE_ = jitted_run_no_processor_list(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ = jitted_run_processor_list(lowercase__ , lowercase__ , lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 702 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 0 |
def __a ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
__UpperCAmelCase = False
if low == high:
return swapped
__UpperCAmelCase = low
__UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right],
collection[left],
)
__UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase = True
__UpperCAmelCase = low + int((high - low) / 2 )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__UpperCAmelCase = True
while is_not_sorted is True:
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
A_ : str = input('Enter numbers separated by a comma:\n').strip()
A_ : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 303 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 303 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 531 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ = current_numerator
snake_case_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000)) | 531 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = []
__magic_name__ :Tuple = []
__magic_name__ :Any = []
for rt in rc.restypes:
__magic_name__ :List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__magic_name__ :List[Any] = {name: i for i, name in enumerate(snake_case )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__magic_name__ :int = torch.tensor(
snake_case, dtype=torch.intaa, device=protein['''aatype'''].device, )
__magic_name__ :Any = torch.tensor(
snake_case, dtype=torch.intaa, device=protein['''aatype'''].device, )
__magic_name__ :List[Any] = torch.tensor(
snake_case, dtype=torch.floataa, device=protein['''aatype'''].device, )
__magic_name__ :Optional[int] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__magic_name__ :List[str] = restype_atomaa_to_atomaa[protein_aatype]
__magic_name__ :str = restype_atomaa_mask[protein_aatype]
__magic_name__ :int = residx_atomaa_mask
__magic_name__ :Any = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__magic_name__ :Dict = restype_atomaa_to_atomaa[protein_aatype]
__magic_name__ :List[str] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__magic_name__ :int = torch.zeros([2_1, 3_7], dtype=torch.floataa, device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__magic_name__ :str = rc.restype_atoa[restype_letter]
__magic_name__ :Dict = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__magic_name__ :Dict = rc.atom_order[atom_name]
__magic_name__ :Optional[int] = 1
__magic_name__ :str = restype_atomaa_mask[protein_aatype]
__magic_name__ :List[str] = residx_atomaa_mask
return protein
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tree_map(lambda snake_case : torch.tensor(snake_case, device=batch['''aatype'''].device ), snake_case, np.ndarray )
__magic_name__ :Optional[int] = tensor_tree_map(lambda snake_case : np.array(snake_case ), make_atomaa_masks(snake_case ) )
return out
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : List[Any] , __snake_case : bool , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None ) -> Optional[Any]:
super().__init__()
__magic_name__: str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__magic_name__: Tuple = torch.zeros(__snake_case , __snake_case )
else:
__magic_name__: Optional[Any] = None
__magic_name__: List[Any] = torch.nn.Parameter(__snake_case )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , __snake_case : VQModel , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : TransformeraDModel , __snake_case : VQDiffusionScheduler , __snake_case : LearnedClassifierFreeSamplingEmbeddings , ) -> Tuple:
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple ) -> str:
__magic_name__: Tuple = len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__magic_name__: Any = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__magic_name__: str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__magic_name__: Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__: Any = text_input_ids[:, : self.tokenizer.model_max_length]
__magic_name__: List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__magic_name__: List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__magic_name__: Optional[int] = prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__magic_name__: Any = self.learned_classifier_free_sampling_embeddings.embeddings
__magic_name__: Tuple = negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__magic_name__: List[str] = [""""""] * batch_size
__magic_name__: Union[str, Any] = text_input_ids.shape[-1]
__magic_name__: int = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""pt""" , )
__magic_name__: Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__magic_name__: Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__: Optional[Any] = negative_prompt_embeds.shape[1]
__magic_name__: Union[str, Any] = negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__magic_name__: Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__: Dict = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[str, List[str]] , __snake_case : int = 1_0_0 , __snake_case : float = 5.0 , __snake_case : float = 1.0 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = 1
elif isinstance(__snake_case , __snake_case ):
__magic_name__: Optional[int] = len(__snake_case )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
__magic_name__: str = batch_size * num_images_per_prompt
__magic_name__: Dict = guidance_scale > 1.0
__magic_name__: Dict = self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(__snake_case )}.' )
# get the initial completely masked latents unless the user supplied it
__magic_name__: Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__magic_name__: str = self.transformer.num_vector_embeds - 1
__magic_name__: Any = torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__magic_name__: Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__magic_name__: Optional[Any] = self.scheduler.timesteps.to(self.device )
__magic_name__: Dict = latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__magic_name__: Optional[int] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__magic_name__: List[str] = self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__magic_name__, __magic_name__: Union[str, Any] = model_output.chunk(2 )
__magic_name__: Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__magic_name__: Union[str, Any] = self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__magic_name__: Dict = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__: Dict = self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = self.vqvae.config.vq_embed_dim
__magic_name__: Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__magic_name__: Union[str, Any] = self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__magic_name__: Optional[Any] = self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__magic_name__: Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__: Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__: str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : torch.FloatTensor , __snake_case : float ) -> torch.FloatTensor:
__magic_name__, __magic_name__: Any = torch.sort(__snake_case , 1 , descending=__snake_case )
__magic_name__: Any = torch.exp(__snake_case )
__magic_name__: str = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__magic_name__: Tuple = torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__magic_name__: List[str] = torch.cat((all_true, keep_mask) , dim=1 )
__magic_name__: Tuple = keep_mask[:, :-1, :]
__magic_name__: List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
__magic_name__: List[str] = log_p_x_0.clone()
__magic_name__: str = -torch.inf # -inf = log(0)
return rv
| 213 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '▁'
__lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCamelCase = {
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
__lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["input_ids", "attention_mask"]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : int , __snake_case : str , __snake_case : Tuple=None , __snake_case : Dict=None , __snake_case : Union[str, Any]="</s>" , __snake_case : int="</s>" , __snake_case : int="<s>" , __snake_case : Tuple="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : Tuple="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__: Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__magic_name__: List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__: str = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__magic_name__: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
__magic_name__: Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__: str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__: List[Any] = 1
__magic_name__: List[Any] = len(self.sp_model )
__magic_name__: Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
__magic_name__: Any = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__: Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__: Any = src_lang if src_lang is not None else """en_XX"""
__magic_name__: Dict = self.lang_code_to_id[self._src_lang]
__magic_name__: Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str ) -> None:
__magic_name__: int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Dict:
__magic_name__: int = self.__dict__.copy()
__magic_name__: List[str] = None
return state
def __setstate__( self : Any , __snake_case : Dict ) -> None:
__magic_name__: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__: Optional[Any] = {}
__magic_name__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__magic_name__: List[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : List[str] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCamelCase__ ( self : int , __snake_case : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__: Optional[Any] = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
__magic_name__: str = []
__magic_name__: Dict = """"""
__magic_name__: Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
__magic_name__: Dict = True
__magic_name__: Optional[Any] = []
else:
current_sub_tokens.append(__snake_case )
__magic_name__: Union[str, Any] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__: Optional[int] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
__magic_name__: str = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowerCamelCase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__magic_name__: List[Any] = [1] * len(self.prefix_tokens )
__magic_name__: Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Tuple ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__: Union[str, Any] = src_lang
__magic_name__: int = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = self.convert_tokens_to_ids(__snake_case )
__magic_name__: int = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[str] , __snake_case : str = "en_XX" , __snake_case : Optional[List[str]] = None , __snake_case : str = "ro_RO" , **__snake_case : List[Any] , ) -> BatchEncoding:
__magic_name__: List[Any] = src_lang
__magic_name__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Any ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Any , __snake_case : str ) -> None:
__magic_name__: Any = self.lang_code_to_id[src_lang]
__magic_name__: str = [self.cur_lang_code_id]
__magic_name__: Tuple = [self.eos_token_id]
def lowerCamelCase__ ( self : Tuple , __snake_case : str ) -> None:
__magic_name__: int = self.lang_code_to_id[tgt_lang]
__magic_name__: Dict = [self.cur_lang_code_id]
__magic_name__: Optional[int] = [self.eos_token_id]
| 213 | 1 |
def A__ ( _a : int , _a : int ):
'''simple docstring'''
snake_case__ : Tuple =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case__ : List[Any] =n - k
# Calculate C(n,k)
for i in range(_a ):
result *= n - i
result //= i + 1
return result
def A__ ( _a : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , _a ) // (node_count + 1)
def A__ ( _a : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case__ : Optional[int] =1
for i in range(1 , n + 1 ):
result *= i
return result
def A__ ( _a : int ):
'''simple docstring'''
return catalan_number(_a ) * factorial(_a )
if __name__ == "__main__":
__lowerCamelCase : Any = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 385 |
from __future__ import annotations
from typing import Any
def A__ ( _a : list[Any] ):
'''simple docstring'''
create_state_space_tree(_a , [] , 0 )
def A__ ( _a : list[Any] , _a : list[Any] , _a : int ):
'''simple docstring'''
if index == len(_a ):
print(_a )
return
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_a , _a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 385 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , __A : Dict , __A : Dict=3 , __A : Dict=7 , __A : List[Any]=True , __A : Tuple=True , __A : Dict=False , __A : int=True , __A : Any=99 , __A : Optional[int]=32 , __A : str=5 , __A : List[str]=4 , __A : Dict=37 , __A : Dict="gelu" , __A : int=0.1 , __A : int=0.1 , __A : Optional[Any]=512 , __A : str=16 , __A : Union[str, Any]=2 , __A : Union[str, Any]=0.0_2 , __A : int=3 , __A : Optional[Any]=4 , __A : str=None , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__A , )
def lowercase__ ( self : Optional[Any] , __A : Dict , __A : List[str] , __A : Tuple , __A : List[Any] , __A : Any , __A : Dict , __A : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = FalconModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , attention_mask=__A )
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __A : List[Any] , __A : Dict , __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : int , __A : Union[str, Any] , __A : str , __A : Dict , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = FalconModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
lowerCAmelCase__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
lowerCAmelCase__ = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __A : Union[str, Any] , __A : Any , __A : List[str] , __A : Optional[int] , __A : Optional[int] , __A : str , __A : List[str] , __A : Any , __A : List[str] , ) -> str:
'''simple docstring'''
lowerCAmelCase__ = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : int , __A : Optional[Any] , __A : Tuple , __A : Optional[Any] , __A : str , __A : Dict , __A : str , __A : Optional[Any] , __A : Optional[int] , __A : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
lowerCAmelCase__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0]
lowerCAmelCase__ = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A, _A, _A, unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (FalconForCausalLM,) if is_torch_available() else ()
A__ = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = FalconModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ ,*lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase__ = alibi
self.model_tester.create_and_check_model(__A , *__A )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = input_dict["""input_ids"""]
lowerCAmelCase__ = input_ids.ne(1 ).to(__A )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = """single_label_classification"""
lowerCAmelCase__ = input_dict["""input_ids"""]
lowerCAmelCase__ = input_ids.ne(1 ).to(__A )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = input_dict["""input_ids"""]
lowerCAmelCase__ = FalconForCausalLM(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , use_cache=__A )
lowerCAmelCase__ = input_ids.shape[0]
lowerCAmelCase__ = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase__ = model._convert_cache_to_standard_format(__A , __A )
for layer in range(len(__A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = """multi_label_classification"""
lowerCAmelCase__ = input_dict["""input_ids"""]
lowerCAmelCase__ = input_ids.ne(1 ).to(__A )
lowerCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__A , """use_cache""" ):
return
lowerCAmelCase__ = model_class(__A ).to(__A )
if "use_cache" not in inputs:
lowerCAmelCase__ = True
lowerCAmelCase__ = model(**__A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase__ = (
getattr(__A , """decoder_layers""" , __A )
or getattr(__A , """num_decoder_layers""" , __A )
or config.num_hidden_layers
)
lowerCAmelCase__ = getattr(__A , """num_kv_heads""" , config.num_attention_heads )
lowerCAmelCase__ = getattr(__A , """d_model""" , config.hidden_size )
lowerCAmelCase__ = embed_dim // num_attention_heads
lowerCAmelCase__ = outputs["""past_key_values"""]
self.assertEqual(len(__A ) , __A )
lowerCAmelCase__ ,lowerCAmelCase__ = inputs["""input_ids"""].shape
for i in range(__A ):
if config.new_decoder_architecture:
lowerCAmelCase__ = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCAmelCase__ = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__A )
lowerCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
lowerCAmelCase__ = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCAmelCase__ = model.generate(**__A , do_sample=__A , max_new_tokens=19 )
lowerCAmelCase__ = tokenizer.batch_decode(__A )[0]
self.assertEqual(__A , __A )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(__A )
lowerCAmelCase__ = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(__A )
lowerCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(__A )
lowerCAmelCase__ = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(device=__A )
lowerCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# Test results are the same with and without cache
lowerCAmelCase__ = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
lowerCAmelCase__ = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 211 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_UpperCAmelCase : Optional[Any] = json.loads(_UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_UpperCAmelCase : Union[str, Any] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_UpperCAmelCase : Optional[int] = json.loads(_UpperCAmelCase )
if not mpi_options.get("sagemaker_mpi_enabled" , _UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _A ( self : Dict ):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , A , )
@cached_property
def _A ( self : List[str] ):
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
_UpperCAmelCase : int = torch.device("cpu" )
_UpperCAmelCase : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
_UpperCAmelCase : Union[str, Any] = smp.local_rank()
_UpperCAmelCase : Any = torch.device("cuda" , A )
_UpperCAmelCase : Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
_UpperCAmelCase : Dict = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
_UpperCAmelCase : str = torch.device("cuda" , self.local_rank )
_UpperCAmelCase : Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_UpperCAmelCase : Union[str, Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_UpperCAmelCase : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
_UpperCAmelCase : Any = torch.device("cuda" , self.local_rank )
_UpperCAmelCase : Optional[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(A )
return device
@property
def _A ( self : Any ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _A ( self : Union[str, Any] ):
return not is_sagemaker_model_parallel_available()
@property
def _A ( self : Dict ):
return False
| 244 | '''simple docstring'''
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE : Optional[Any] = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
__SCREAMING_SNAKE_CASE : Any = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
__SCREAMING_SNAKE_CASE : str = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ (datasets.Metric ):
'''simple docstring'''
def _A ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _A ( self : List[str] , A : Dict , A : Optional[Any] ):
_UpperCAmelCase : Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCAmelCase : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : Optional[int] = evaluate(dataset=A , predictions=A )
return score
| 244 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( a_ , a_ ) -> np.array:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = f"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : List[Any] = '1'
SCREAMING_SNAKE_CASE : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE : Optional[Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : List[str] = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : str = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCAmelCase ( a_ , a_ , a_ = "f32le" , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = f"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Any = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : str = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Optional[int] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = 'alsa'
SCREAMING_SNAKE_CASE : List[str] = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : int = 'avfoundation'
SCREAMING_SNAKE_CASE : Optional[int] = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : List[str] = 'dshow'
SCREAMING_SNAKE_CASE : Dict = 'default'
SCREAMING_SNAKE_CASE : Dict = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Any = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def __lowerCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = "f32le" , ) -> Tuple:
'''simple docstring'''
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Optional[int] = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Tuple = chunk_length_s
SCREAMING_SNAKE_CASE : str = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Any = np.intaa
SCREAMING_SNAKE_CASE : Tuple = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : str = np.floataa
SCREAMING_SNAKE_CASE : List[str] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : int = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
SCREAMING_SNAKE_CASE : List[str] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Tuple = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Optional[Any] = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Optional[int] = np.frombuffer(item['raw'] , dtype=a_ )
SCREAMING_SNAKE_CASE : List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( a_ , a_ , a_ , a_ = False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : List[str] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
SCREAMING_SNAKE_CASE : List[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
def __lowerCAmelCase ( a_ , a_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Dict = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 179 | '''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : str = str(bin(a_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
if shift_amount >= len(a_ ):
return "0b0"
SCREAMING_SNAKE_CASE : Dict = binary_number[: len(a_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE : Tuple = '0' + str(bin(a_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE : Union[str, Any] = len(bin(a_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE : Any = bin(abs(a_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE : Optional[Any] = (
'1' + '0' * (binary_number_length - len(a_ )) + binary_number
)
if shift_amount >= len(a_ ):
return "0b" + binary_number[0] * len(a_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'''{test_file} instead.''' )
lowerCAmelCase_ : Dict = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCAmelCase_ : int = components[:-1] + [test_fn.replace(".py" , "" )]
lowerCAmelCase_ : Union[str, Any] = ".".join(__UpperCamelCase )
return test_module_path
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = get_module_path(__UpperCamelCase )
lowerCAmelCase_ : str = importlib.import_module(__UpperCamelCase )
return test_module
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Optional[int] = get_test_module(__UpperCamelCase )
for attr in dir(__UpperCamelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(__UpperCamelCase , __UpperCamelCase ) )
# sort with class names
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x.__name__ )
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Optional[Any] = get_test_module(__UpperCamelCase )
for attr in dir(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = getattr(__UpperCamelCase , __UpperCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCAmelCase_ : Union[str, Any] = getattr(__UpperCamelCase , "all_model_classes" , [] )
if len(__UpperCamelCase ) > 0:
test_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x.__name__ )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = get_test_classes(__UpperCamelCase )
lowerCAmelCase_ : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x.__name__ )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = test_class()
if hasattr(__UpperCamelCase , "setUp" ):
test.setUp()
lowerCAmelCase_ : Any = None
if hasattr(__UpperCamelCase , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCAmelCase_ : str = test.model_tester.__class__
return model_tester
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Dict = get_test_classes(__UpperCamelCase )
lowerCAmelCase_ : Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x.__name__ )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = get_test_classes_for_model(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = []
for test_class in test_classes:
lowerCAmelCase_ : str = get_model_tester_from_test_class(__UpperCamelCase )
if tester_class is not None:
tester_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x.__name__ )
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = get_test_classes(__UpperCamelCase )
lowerCAmelCase_ : Tuple = {test_class: get_model_tester_from_test_class(__UpperCamelCase ) for test_class in test_classes}
return test_tester_mapping
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Dict = get_model_classes(__UpperCamelCase )
lowerCAmelCase_ : Any = {
model_class: get_test_classes_for_model(__UpperCamelCase , __UpperCamelCase ) for model_class in model_classes
}
return model_test_mapping
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[str] = get_model_classes(__UpperCamelCase )
lowerCAmelCase_ : str = {
model_class: get_tester_classes_for_model(__UpperCamelCase , __UpperCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return o
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return o.__name__
elif isinstance(__UpperCamelCase , (list, tuple) ):
return [to_json(__UpperCamelCase ) for x in o]
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return {to_json(__UpperCamelCase ): to_json(__UpperCamelCase ) for k, v in o.items()}
else:
return o
| 610 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 610 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class __snake_case:
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__A : Dict = []
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
return self.node_position[vertex]
def _a ( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = pos
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__A : List[str] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__A : Dict = 2 * start + 1
else:
__A : Optional[int] = 2 * start + 2
if heap[smallest_child] < heap[start]:
__A , __A : str = heap[smallest_child], positions[smallest_child]
__A , __A : List[str] = (
heap[start],
positions[start],
)
__A , __A : Tuple = temp, tempa
__A : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCamelCase )
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = position[index]
while index != 0:
__A : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__A : Union[str, Any] = heap[parent]
__A : Any = position[parent]
self.set_position(position[parent] , __lowerCamelCase )
else:
__A : int = val
__A : Optional[Any] = temp
self.set_position(__lowerCamelCase , __lowerCamelCase )
break
__A : Optional[Any] = parent
else:
__A : str = val
__A : Any = temp
self.set_position(__lowerCamelCase , 0 )
def _a ( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = len(__lowerCamelCase ) // 2 - 1
for i in range(__lowerCamelCase , -1 , -1 ):
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , len(__lowerCamelCase ) , __lowerCamelCase )
def _a ( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = positions[0]
__A : Optional[Any] = sys.maxsize
self.top_to_bottom(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase )
return temp
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> Any:
'''simple docstring'''
__A : Any = Heap()
__A : int = [0] * len(_SCREAMING_SNAKE_CASE )
__A : Dict = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__A : Any = [] # Heap of Distance of vertices from their neighboring vertex
__A : Any = []
for vertex in range(len(_SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(_SCREAMING_SNAKE_CASE )
heap.node_position.append(_SCREAMING_SNAKE_CASE )
__A : List[Any] = []
__A : Optional[int] = 1
__A : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__A : Any = 0
__A : List[Any] = distance
heap.heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for _ in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__A : int = heap.delete_minimum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__A : Tuple = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )]
):
__A : str = distance
heap.bottom_to_top(
_SCREAMING_SNAKE_CASE , heap.get_position(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase : Dict =int(input('''Enter number of edges: ''').strip())
lowerCamelCase : List[str] =defaultdict(list)
for _ in range(edges_number):
lowerCamelCase : str =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 237 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCamelCase : Optional[Any] ={
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "tapas"
def __init__( self , __lowerCamelCase=30522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1024 , __lowerCamelCase=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase=0.02 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase=10.0 , __lowerCamelCase=0 , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=1.0 , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=1.0 , __lowerCamelCase=1.0 , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase="ratio" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=64 , __lowerCamelCase=32 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : int = vocab_size
__A : int = hidden_size
__A : Tuple = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : str = intermediate_size
__A : Any = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : List[str] = type_vocab_sizes
__A : Tuple = initializer_range
__A : List[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : Union[str, Any] = positive_label_weight
__A : Optional[int] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : List[Any] = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Dict = use_normalized_answer_loss
__A : Dict = huber_loss_delta
__A : Union[str, Any] = temperature
__A : List[str] = aggregation_temperature
__A : Optional[int] = use_gumbel_for_cells
__A : Optional[int] = use_gumbel_for_aggregation
__A : Optional[int] = average_approximation_function
__A : List[Any] = cell_selection_preference
__A : Optional[int] = answer_loss_cutoff
__A : Tuple = max_num_rows
__A : List[str] = max_num_columns
__A : Optional[int] = average_logits_per_cell
__A : Tuple = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : List[Any] = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Any = aggregation_labels
__A : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
__A : Optional[int] = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 237 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
self.assertEqual(len(lowercase_) , len(lowercase_))
for a, b in zip(lowercase_ , lowercase_):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_)
def _a ( self) -> int:
__snake_case = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(lowercase_):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step , 3)
self.assertEqual(len(accumulator.gradients) , 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2)
def _a ( self) -> Any:
__snake_case = None
ops.enable_eager_execution_internal()
__snake_case = tf.config.list_physical_devices('CPU')
if len(lowercase_) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()])
__snake_case = tf.config.list_logical_devices(device_type='CPU')
__snake_case = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
__snake_case = GradientAccumulator()
__snake_case = tf.Variable([4.0, 3.0])
__snake_case , __snake_case = create_optimizer(5e-5 , 1_0 , 5)
__snake_case = tf.Variable([0.0, 0.0] , trainable=lowercase_)
def accumulate_on_replica(lowercase_):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable])))
@tf.function
def accumulate(lowercase_ , lowercase_):
with strategy.scope():
__snake_case = strategy.experimental_local_results(lowercase_)
local_variables[0].assign(lowercase_)
local_variables[1].assign(lowercase_)
strategy.run(lowercase_ , args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase_)
def _check_local_values(lowercase_ , lowercase_):
__snake_case = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value() , lowercase_ , tol=1e-2)
self.assertListAlmostEqual(values[1].value() , lowercase_ , tol=1e-2)
accumulate([1.0, 2.0] , [-1.0, 1.0])
accumulate([3.0, -1.0] , [-1.0, -1.0])
accumulate([-2.0, 2.0] , [3.0, -2.0])
self.assertEqual(accumulator.step , 3)
_check_local_values([2.0, 3.0] , [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
_check_local_values([0.0, 0.0] , [0.0, 0.0])
| 313 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
def __init__( self , lowercase_ , lowercase_=sys.maxsize) -> Optional[int]:
__snake_case = 'bilinear'
__snake_case = max_size
__snake_case = short_edge_length
def __call__( self , lowercase_) -> Optional[int]:
__snake_case = []
for img in imgs:
__snake_case , __snake_case = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case = size * 1.0 / min(lowercase_ , lowercase_)
if h < w:
__snake_case , __snake_case = size, scale * w
else:
__snake_case , __snake_case = scale * h, size
if max(lowercase_ , lowercase_) > self.max_size:
__snake_case = self.max_size * 1.0 / max(lowercase_ , lowercase_)
__snake_case = newh * scale
__snake_case = neww * scale
__snake_case = int(neww + 0.5)
__snake_case = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case = Image.fromarray(lowercase_)
__snake_case = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case = np.asarray(lowercase_)
else:
__snake_case = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case = nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0)
img_augs.append(lowercase_)
return img_augs
class __lowercase :
def __init__( self , lowercase_) -> Optional[int]:
__snake_case = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case = cfg.INPUT.FORMAT
__snake_case = cfg.SIZE_DIVISIBILITY
__snake_case = cfg.PAD_VALUE
__snake_case = cfg.INPUT.MAX_SIZE_TEST
__snake_case = cfg.MODEL.DEVICE
__snake_case = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case = lambda lowercase_: (x - self.pixel_mean) / self.pixel_std
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = tuple(max(lowercase_) for s in zip(*[img.shape for img in images]))
__snake_case = [im.shape[-2:] for im in images]
__snake_case = [
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_)
]
return torch.stack(lowercase_), torch.tensor(lowercase_)
def __call__( self , lowercase_ , lowercase_=False) -> List[str]:
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_):
__snake_case = [images]
if single_image:
assert len(lowercase_) == 1
for i in range(len(lowercase_)):
if isinstance(images[i] , torch.Tensor):
images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case = torch.tensor([im.shape[:2] for im in images])
__snake_case = self.aug(lowercase_)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case = [self.normalizer(lowercase_) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case = self.pad(lowercase_)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case = torch.true_divide(lowercase_ , lowercase_)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def A ( snake_case__ : Tuple , snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def A ( snake_case__ : Optional[int] , snake_case__ : Tuple[int, int] ) -> Dict:
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 313 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: List[Any] = logging.get_logger(__name__)
A__: str = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Union[str, Any] = "speech_to_text_2"
__UpperCamelCase : str = ["past_key_values"]
__UpperCamelCase : str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple=1_0_0_0_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2_0_4_8 , SCREAMING_SNAKE_CASE :Optional[Any]=4 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :int="relu" , SCREAMING_SNAKE_CASE :Tuple=2_5_6 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :Tuple=0.0 , SCREAMING_SNAKE_CASE :Tuple=0.02 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :int=0 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0_2_4 , **SCREAMING_SNAKE_CASE :Any , ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =vocab_size
_a : Tuple =d_model
_a : str =decoder_ffn_dim
_a : Optional[Any] =decoder_layers
_a : Optional[Any] =decoder_attention_heads
_a : Any =dropout
_a : Tuple =attention_dropout
_a : Dict =activation_dropout
_a : Tuple =activation_function
_a : List[str] =init_std
_a : Optional[int] =decoder_layerdrop
_a : Tuple =use_cache
_a : List[Any] =decoder_layers
_a : int =scale_embedding # scale factor will be sqrt(d_model) if True
_a : Union[str, Any] =max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 506 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 506 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=2_24 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , )-> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =size if size is not None else {'height': 18, 'width': 18}
A__ : Union[str, Any] =parent
A__ : Dict =batch_size
A__ : Optional[Any] =num_channels
A__ : Union[str, Any] =image_size
A__ : Dict =min_resolution
A__ : Union[str, Any] =max_resolution
A__ : List[str] =do_resize
A__ : int =size
A__ : Union[str, Any] =do_normalize
A__ : List[Any] =image_mean
A__ : int =image_std
def lowerCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( __UpperCAmelCase ,unittest.TestCase ):
__lowercase = ViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
A__ : Optional[int] =EfficientFormerImageProcessorTester(self )
@property
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self )-> str:
'''simple docstring'''
A__ : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) )
def lowerCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Dict =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
A__ : str =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ : Union[str, Any] =image_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Union[str, Any] =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
A__ : str =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ : Dict =image_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCAmelCase_ ( self )-> Dict:
'''simple docstring'''
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[int] =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
A__ : int =image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ : List[Any] =image_processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 416 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase = "▁" , lowercase = True , lowercase = "<unk>" , lowercase = "</s>" , lowercase = "<pad>" , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
a__ : List[str] = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a__ : Union[str, Any] = token_dict['token']
a__ : Any = Tokenizer(Unigram())
a__ : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}') , ' '),
normalizers.Lowercase(),
])
a__ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase , add_prefix_space=lowercase),
pre_tokenizers.Digits(individual_digits=lowercase),
pre_tokenizers.Punctuation(),
])
a__ : Dict = decoders.Metaspace(replacement=lowercase , add_prefix_space=lowercase)
a__ : int = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
a__ : str = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase = 8000 , lowercase = True , ) -> Any:
'''simple docstring'''
a__ : int = trainers.UnigramTrainer(
vocab_size=lowercase , special_tokens=self.special_tokens_list , show_progress=lowercase , )
if isinstance(lowercase , lowercase):
a__ : List[Any] = [files]
self._tokenizer.train(lowercase , trainer=lowercase)
self.add_unk_id()
def __lowercase ( self , lowercase , lowercase = 8000 , lowercase = True , ) -> Dict:
'''simple docstring'''
a__ : str = trainers.UnigramTrainer(
vocab_size=lowercase , special_tokens=self.special_tokens_list , show_progress=lowercase , )
self._tokenizer.train_from_iterator(lowercase , trainer=lowercase)
self.add_unk_id()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = json.loads(self._tokenizer.to_str())
a__ : List[Any] = self.special_tokens['unk']['id']
a__ : str = Tokenizer.from_str(json.dumps(lowercase))
| 302 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for char in word:
__snake_case = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case = set()
for token in tokens:
__snake_case = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
__snake_case = list(_lowerCAmelCase )
return word_list
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__snake_case = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
__snake_case = bert_tokens
__snake_case , __snake_case = 0, len(_lowerCAmelCase )
while start < end:
__snake_case = True
if is_chinese(bert_word[start] ):
__snake_case = min(end - start , _lowerCAmelCase )
for i in range(_lowerCAmelCase , 1 , -1 ):
__snake_case = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__snake_case = "##" + bert_word[j]
__snake_case = start + i
__snake_case = False
break
if single_word:
start += 1
return bert_word
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
__snake_case = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
__snake_case = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
__snake_case = []
for i in range(0 , len(_lowerCAmelCase ) , 100 ):
__snake_case = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
__snake_case = []
for input_ids, chinese_word in zip(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case = []
for id in input_ids:
__snake_case = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
__snake_case = add_sub_symbol(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
__snake_case = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__snake_case = f.readlines()
__snake_case = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__snake_case = LTP(args.ltp ) # faster in GPU device
__snake_case = BertTokenizer.from_pretrained(args.bert )
__snake_case = prepare_ref(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__snake_case = [json.dumps(_lowerCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
A : str = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
A : int = parser.parse_args()
main(args)
| 473 |
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
A : Optional[int] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
A : str = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int="uniform_average" , SCREAMING_SNAKE_CASE : Optional[int]=True ) -> Tuple:
'''simple docstring'''
__snake_case = mean_squared_error(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE , multioutput=SCREAMING_SNAKE_CASE , squared=SCREAMING_SNAKE_CASE )
return {"mse": mse}
| 473 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : int
__lowerCamelCase : str
class lowerCAmelCase_ ( __magic_name__ ):
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 1
_lowerCAmelCase = [1, 2]
_lowerCAmelCase = {"a": 1, "b": 2}
_lowerCAmelCase = {"a": [1, 2], "b": [3, 4]}
_lowerCAmelCase = {"a": {"1": 1}, "b": 2}
_lowerCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = [2, 3]
_lowerCAmelCase = {"a": 2, "b": 3}
_lowerCAmelCase = {"a": [2, 3], "b": [4, 5]}
_lowerCAmelCase = {"a": {"1": 2}, "b": 3}
_lowerCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_lowerCAmelCase = {"a": 2, "b": 0, "c": 2}
_lowerCAmelCase = {
"a": np.eye(2 ).astype(_lowerCAmelCase ),
"b": np.zeros(3 ).astype(_lowerCAmelCase ),
"c": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = {"a": 1, "b": 2}
_lowerCAmelCase = {"a": 3, "b": 4}
_lowerCAmelCase = {"a": 5, "b": 6}
_lowerCAmelCase = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def _snake_case ( self ) -> str:
class lowerCAmelCase_ :
__lowerCamelCase : Tuple = "bar"
_lowerCAmelCase = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowerCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_lowerCAmelCase = {F'''{i}''': i for i in range(SCREAMING_SNAKE_CASE_ )}
_lowerCAmelCase = map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 10 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase_ ( __magic_name__ ):
@require_tf
def _snake_case ( self ) -> Optional[Any]:
import tensorflow as tf
from tensorflow.keras import layers
_lowerCAmelCase = layers.Dense(2 )
def gen_random_output():
_lowerCAmelCase = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _snake_case ( self ) -> Any:
import torch
def gen_random_output():
_lowerCAmelCase = torch.nn.Linear(3 , 2 )
_lowerCAmelCase = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _snake_case ( self ) -> Dict:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).flatten()
assert output == expected_output
def __a():
'''simple docstring'''
_lowerCAmelCase = A(x=1 , y="foobar" )
_lowerCAmelCase = {"x": 1, "y": "foobar"}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
_lowerCAmelCase = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
_lowerCAmelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
asdict([1, A(x=10 , y="foo" )] )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return text.split()
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a():
'''simple docstring'''
with Pool(2 ) as pool:
_lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCAmelCase = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(SCREAMING_SNAKE_CASE_ ) == 4
| 18 |
from math import factorial
def __lowercase ( _UpperCamelCase = 100 ) ->int:
"""simple docstring"""
return sum(map(_UpperCamelCase, str(factorial(_UpperCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 319 | 0 |
'''simple docstring'''
import sys
__snake_case = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A_ ( SCREAMING_SNAKE_CASE_ = N ) ->int:
lowercase_ = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
lowercase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase_ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710 | '''simple docstring'''
# Lint as: python3
import itertools
import os
import re
__snake_case = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
__snake_case = re.compile(r"""([a-z\d])([A-Z])""")
__snake_case = re.compile(r"""(?<!_)_(?!_)""")
__snake_case = re.compile(r"""(_{2,})""")
__snake_case = r"""^\w+(\.\w+)*$"""
__snake_case = r"""<>:/\|?*"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
lowercase_ = _uppercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
lowercase_ = _lowercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
return name.lower()
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = _single_underscore_re.split(SCREAMING_SNAKE_CASE_ )
lowercase_ = [_multiple_underscores_re.split(SCREAMING_SNAKE_CASE_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) if n != """""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(SCREAMING_SNAKE_CASE_ )}-{split}"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) ->Tuple:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""{filepath}*"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[Any]:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if shard_lengths:
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(SCREAMING_SNAKE_CASE_ )]
if filetype_suffix:
lowercase_ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase_ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 603 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = {}
state_dict.pop('pixel_mean' , _SCREAMING_SNAKE_CASE )
state_dict.pop('pixel_std' , _SCREAMING_SNAKE_CASE )
_A = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_A = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
_A = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_A = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_A = key.replace('layers.2' , 'proj_out' )
_A = value
_A = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> Optional[int]:
"""simple docstring"""
_A = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_A = SamConfig()
elif "sam_vit_l" in model_name:
_A = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_A = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
_A = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_A = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
_A = replace_keys(_SCREAMING_SNAKE_CASE )
_A = SamImageProcessor()
_A = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
_A = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
_A = hf_model.to('cuda' )
_A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_A = [[[400, 650]]]
_A = [[1]]
_A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
_A = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
_A = ((75, 275, 1_725, 850),)
_A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
_A = [[[400, 650], [800, 650]]]
_A = [[1, 1]]
_A = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
__A : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__A : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 27 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Optional[Any] = 'Salesforce/blip-image-captioning-base'
lowerCamelCase__ : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCamelCase__ : List[str] = 'image_captioner'
lowerCamelCase__ : str = AutoModelForVisionaSeq
lowerCamelCase__ : Dict = ['image']
lowerCamelCase__ : str = ['text']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.pre_processor(images=__A , return_tensors="""pt""" )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.model.generate(**__A )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 715 |
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , A__ , 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def UpperCamelCase_ ( A__ = 99_99_66_66_33_33 ):
a_ = math.floor(math.sqrt(A__ ) ) + 1_00
a_ = prime_sieve(A__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 511 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = KandinskyInpaintPipeline
lowerCamelCase__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ = False
@property
def __a ( self ) -> Dict:
return 32
@property
def __a ( self ) -> List[str]:
return 32
@property
def __a ( self ) -> str:
return self.time_input_dim
@property
def __a ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def __a ( self ) -> Any:
return 100
@property
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __a ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
SCREAMING_SNAKE_CASE : str =MultilingualCLIP(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] =text_encoder.eval()
return text_encoder
@property
def __a ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any ={
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] =UNetaDConditionModel(**snake_case_ )
return model
@property
def __a ( self ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : str =self.dummy_text_encoder
SCREAMING_SNAKE_CASE : List[Any] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] =self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] =DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , )
SCREAMING_SNAKE_CASE : Any ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self , snake_case_ , snake_case_=0 ) -> int:
SCREAMING_SNAKE_CASE : int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
SCREAMING_SNAKE_CASE : Union[str, Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Tuple =Image.fromarray(np.uinta(snake_case_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE : Optional[Any] =np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict =0
if str(snake_case_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[Any] =torch.manual_seed(snake_case_ )
else:
SCREAMING_SNAKE_CASE : Any =torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] ={
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __a ( self ) -> Any:
SCREAMING_SNAKE_CASE : Dict ='''cpu'''
SCREAMING_SNAKE_CASE : List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] =self.pipeline_class(**snake_case_ )
SCREAMING_SNAKE_CASE : str =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] =pipe(**self.get_dummy_inputs(snake_case_ ) )
SCREAMING_SNAKE_CASE : str =output.images
SCREAMING_SNAKE_CASE : List[str] =pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
SCREAMING_SNAKE_CASE : str =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str =image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : str =np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __a ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : int =np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str =0
SCREAMING_SNAKE_CASE : Tuple ='''a hat'''
SCREAMING_SNAKE_CASE : List[str] =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple =pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple =pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Tuple =pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 258 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str ={
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] =input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] =f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
assert base_extractor.is_extractable(__a )
SCREAMING_SNAKE_CASE : Tuple =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__a , __a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : List[str] =file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Dict =output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : Optional[int] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] ={
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE : Any =input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : int =f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
SCREAMING_SNAKE_CASE : int =Extractor.infer_extractor_format(__a )
assert extractor_format is not None
SCREAMING_SNAKE_CASE : List[str] =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__a , __a , __a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Any =file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : List[str] =output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : Any =tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE : Tuple =directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__a , '''w''' ) as f:
f.add(__a , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[str] =tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE : Tuple =directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__a )
with tarfile.TarFile(__a , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int ={
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE : Any =insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE : Optional[Any] =tmp_path / '''extracted'''
TarExtractor.extract(__a , __a )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] =tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE : List[Any] =(
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__a )
assert zipfile.is_zipfile(str(__a ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__a ) # but we're right
| 258 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int=1_024 ):
SCREAMING_SNAKE_CASE__= [], []
SCREAMING_SNAKE_CASE__= list(zip(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE__= sorted_examples[0]
def is_too_big(snake_case_ : List[str] ):
return tok(snake_case_ , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__= new_src + ''' ''' + src
SCREAMING_SNAKE_CASE__= new_tgt + ''' ''' + tgt
if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
SCREAMING_SNAKE_CASE__= src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__= cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
return finished_src, finished_tgt
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Path , snake_case_ : str , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__= Path(snake_case_ )
save_path.mkdir(exist_ok=snake_case_ )
for split in ["train"]:
SCREAMING_SNAKE_CASE__= data_dir / F'{split}.source', data_dir / F'{split}.target'
SCREAMING_SNAKE_CASE__= [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
SCREAMING_SNAKE_CASE__= [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
SCREAMING_SNAKE_CASE__= pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
print(F'packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.' )
Path(save_path / F'{split}.source' ).open('''w''' ).write('''\n'''.join(snake_case_ ) )
Path(save_path / F'{split}.target' ).open('''w''' ).write('''\n'''.join(snake_case_ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__= data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(snake_case_ , save_path / F'{split}.source' )
shutil.copyfile(snake_case_ , save_path / F'{split}.target' )
def A__ ( ):
SCREAMING_SNAKE_CASE__= argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=snake_case_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=snake_case_ , default=128 )
parser.add_argument('''--data_dir''' , type=snake_case_ )
parser.add_argument('''--save_path''' , type=snake_case_ )
SCREAMING_SNAKE_CASE__= parser.parse_args()
SCREAMING_SNAKE_CASE__= AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 703 | import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Dict= min_resolution
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_resolution
SCREAMING_SNAKE_CASE__: Optional[Any]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr
def UpperCamelCase_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Dict= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: int= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: str= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Dict= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: List[Any]= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 107 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = (16, 32, 96, 256)
UpperCAmelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ = []
for i in range(len(self.block_out_channels) - 1):
A__ = self.block_out_channels[i]
A__ = self.block_out_channels[i + 1]
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__)
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__)
A__ = blocks
A__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.conv_in(UpperCAmelCase__)
A__ = nn.silu(UpperCAmelCase__)
for block in self.blocks:
A__ = block(UpperCAmelCase__)
A__ = nn.silu(UpperCAmelCase__)
A__ = self.conv_out(UpperCAmelCase__)
return embedding
@flax_register_to_config
class UpperCamelCase_ ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = False
UpperCAmelCase__ = (320, 640, 1280, 1280)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 1280
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = "rgb"
UpperCAmelCase__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : jax.random.KeyArray) ->FrozenDict:
'''simple docstring'''
A__ = (1, self.in_channels, self.sample_size, self.sample_size)
A__ = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = jnp.ones((1,) , dtype=jnp.intaa)
A__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
A__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
A__ = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa)
A__ , A__ = jax.random.split(UpperCAmelCase__)
A__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)["params"]
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.block_out_channels
A__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A__ = self.num_attention_heads or self.attention_head_dim
# input
A__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
A__ = FlaxTimestepEmbedding(UpperCAmelCase__ , dtype=self.dtype)
A__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A__ = self.only_cross_attention
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = (num_attention_heads,) * len(self.down_block_types)
# down
A__ = []
A__ = []
A__ = block_out_channels[0]
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__)
for i, down_block_type in enumerate(self.down_block_types):
A__ = output_channel
A__ = block_out_channels[i]
A__ = i == len(UpperCAmelCase__) - 1
if down_block_type == "CrossAttnDownBlock2D":
A__ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A__ = FlaxDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase__)
for _ in range(self.layers_per_block):
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__)
if not is_final_block:
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__)
A__ = down_blocks
A__ = controlnet_down_blocks
# mid
A__ = block_out_channels[-1]
A__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A__ = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A__ = jnp.flip(UpperCAmelCase__ , axis=1)
# 1. time
if not isinstance(UpperCAmelCase__ , jnp.ndarray):
A__ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(UpperCAmelCase__ , jnp.ndarray) and len(timesteps.shape) == 0:
A__ = timesteps.astype(dtype=jnp.floataa)
A__ = jnp.expand_dims(UpperCAmelCase__ , 0)
A__ = self.time_proj(UpperCAmelCase__)
A__ = self.time_embedding(UpperCAmelCase__)
# 2. pre-process
A__ = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1))
A__ = self.conv_in(UpperCAmelCase__)
A__ = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1))
A__ = self.controlnet_cond_embedding(UpperCAmelCase__)
sample += controlnet_cond
# 3. down
A__ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ , A__ = down_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train)
else:
A__ , A__ = down_block(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
A__ = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train)
# 5. contronet blocks
A__ = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__ , self.controlnet_down_blocks):
A__ = controlnet_block(UpperCAmelCase__)
controlnet_down_block_res_samples += (down_block_res_sample,)
A__ = controlnet_down_block_res_samples
A__ = self.controlnet_mid_block(UpperCAmelCase__)
# 6. scaling
A__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase__ , mid_block_res_sample=UpperCAmelCase__)
| 87 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase__ = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
lowerCamelCase = """all_checks"""
lowerCamelCase = """basic_checks"""
lowerCamelCase = """no_checks"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case : Union[str, Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE__ ) )
logger.info('''All the splits matched successfully.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case : Any = shaaaa()
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE__ )
snake_case : Any = m.hexdigest()
else:
snake_case : Tuple = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE__ ), "checksum": checksum}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 638 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : Any = StableDiffusionSAGPipeline
UpperCamelCase__ : Dict = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Tuple = False
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a__ = CLIPTextModel(a_ )
a__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , a_ , a_=0 ):
if str(a_ ).startswith("""mps""" ):
a__ = torch.manual_seed(a_ )
else:
a__ = torch.Generator(device=a_ ).manual_seed(a_ )
a__ = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
a__ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
a__ = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
a__ = """."""
a__ = torch.manual_seed(0 )
a__ = sag_pipe(
[prompt] , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _a ( self ):
a__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a__ = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
a__ = """."""
a__ = torch.manual_seed(0 )
a__ = sag_pipe(
[prompt] , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _a ( self ):
a__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a__ = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
a__ = """."""
a__ = torch.manual_seed(0 )
a__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
a__ = output.images
assert image.shape == (1, 512, 768, 3)
| 351 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
UpperCAmelCase = """▁"""
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = AlbertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_=True , a_=False , a_="[CLS]" , a_="[SEP]" , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a__ = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_ )
if isinstance(a_ , a_ )
else mask_token
)
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
a__ = do_lower_case
a__ = remove_space
a__ = keep_accents
a__ = vocab_file
a__ = False if not self.vocab_file else True
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a_ , a_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 351 | 1 |
from math import pow
def __lowercase ( a__ , a__ , a__ , a__ , a__ , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__SCREAMING_SNAKE_CASE = int(pow(lowerCamelCase__ , lowerCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__SCREAMING_SNAKE_CASE = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__SCREAMING_SNAKE_CASE = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
return current_sum, solutions_count
def __lowercase ( a__ , a__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(lowerCamelCase__ , lowerCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
"""simple docstring"""
import math
import qiskit
def _snake_case ( lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowerCamelCase_ : Optional[Any] =qiskit.QuantumRegister(4 , "qr" )
lowerCamelCase_ : List[Any] =qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowerCamelCase_ : Tuple =[input_a, input_a, carry_in]
lowerCamelCase_ : Union[str, Any] =qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase__ ) # measure the last two qbits
lowerCamelCase_ : Any =qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase_ : str =qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 153 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Any = pytest.mark.integration
UpperCAmelCase__ : List[str] = {"comet"}
UpperCAmelCase__ : List[str] = importlib.util.find_spec('fairseq') is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : Optional[Any] = os.name == "nt"
UpperCAmelCase__ : Tuple = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : str = importlib.util.find_spec('transformers') is not None
def lowercase_ ( _snake_case ):
@wraps(__snake_case )
def wrapper(self ,_snake_case ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self ,__snake_case )
return wrapper
def lowercase_ ( _snake_case ):
@wraps(__snake_case )
def wrapper(self ,_snake_case ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self ,__snake_case )
return wrapper
def lowercase_ ( _snake_case ):
@wraps(__snake_case )
def wrapper(self ,_snake_case ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self ,__snake_case )
return wrapper
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_A , _A , _A )
@local
class lowerCAmelCase_ (parameterized.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = {}
__UpperCamelCase : str = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "[...]"
SCREAMING_SNAKE_CASE__ : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE__ ) ).module_path )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ )
# check parameters
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE__ : Tuple = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "[...]"
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE__ : str = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ):
yield
else:
yield
@contextmanager
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return load_metric(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
SCREAMING_SNAKE_CASE__ : Any = load_local_metric
yield
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = contextmanager(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def lowercase_ ( _snake_case ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" ,"""""" ,"""""" ) # handle pytest cli flags
class lowerCAmelCase_ (_A ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
SCREAMING_SNAKE_CASE__ : List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def lowercase_ ( _snake_case ):
import torch
def bert_cos_score_idf(_snake_case ,_snake_case ,*_snake_case ,**_snake_case ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE__ : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def lowercase_ ( _snake_case ):
def load_from_checkpoint(_snake_case ):
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == 2
SCREAMING_SNAKE_CASE__ : int = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_from_checkpoint
yield
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_metric(os.path.join("""metrics""" ,"""seqeval""" ) )
SCREAMING_SNAKE_CASE__ : List[str] = "ERROR"
SCREAMING_SNAKE_CASE__ : Dict = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__snake_case ,match=re.escape(__snake_case ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__snake_case )
| 713 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''unispeech'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.5 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE__ : Any = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Dict = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : int = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = final_dropout
SCREAMING_SNAKE_CASE__ : str = layerdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_ctc_classes
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : Dict = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Tuple = apply_spec_augment
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Any = mask_feature_length
SCREAMING_SNAKE_CASE__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Dict = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : Any = num_codevector_groups
SCREAMING_SNAKE_CASE__ : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : str = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_negatives
SCREAMING_SNAKE_CASE__ : Tuple = codevector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : Tuple = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : str = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ : List[str] = replace_prob
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 545 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "summarization"
lowercase_ = ["loss"]
lowercase_ = ROUGE_KEYS
lowercase_ = "rouge2"
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_lowerCAmelCase , num_labels=_lowerCAmelCase , mode=self.mode , **_lowerCAmelCase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / 'metrics.json'
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['repo_sha']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_lowerCAmelCase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
return self.model(_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[int] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return lmap(str.strip , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : dict ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['input_ids'], batch['attention_mask']
SCREAMING_SNAKE_CASE_ = batch['labels']
if isinstance(self.model , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , _lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self(_lowerCAmelCase , attention_mask=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , use_cache=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_lowerCAmelCase , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_lowerCAmelCase , _lowerCAmelCase , self.hparams.label_smoothing , ignore_index=_lowerCAmelCase )
return (loss,)
@property
def lowerCAmelCase_ ( self : Any ):
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self._step(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _lowerCAmelCase ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE_ = batch['input_ids'].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
return self._generative_step(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['loss']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).type_as(_lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_lowerCAmelCase ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
return calculate_rouge(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : dict ):
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['labels'] )
SCREAMING_SNAKE_CASE_ = self._step(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_lowerCAmelCase , _lowerCAmelCase ) )
base_metrics.update(gen_time=_lowerCAmelCase , gen_len=_lowerCAmelCase , preds=_lowerCAmelCase , target=_lowerCAmelCase , **_lowerCAmelCase )
return base_metrics
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
return self._generative_step(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : str ):
return self.validation_epoch_end(_lowerCAmelCase , prefix='test' )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_lowerCAmelCase , n_obs=_lowerCAmelCase , max_target_length=_lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
SCREAMING_SNAKE_CASE_ = self.get_dataset(_lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCAmelCase , num_workers=self.num_workers , sampler=_lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCAmelCase , batch_sampler=_lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowerCAmelCase , batch_size=_lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCAmelCase , num_workers=self.num_workers , sampler=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_lowerCAmelCase )
return dataloader
def lowerCAmelCase_ ( self : Any ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self : Optional[int] ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
BaseTransformer.add_model_specific_args(_lowerCAmelCase , _lowerCAmelCase )
add_generic_args(_lowerCAmelCase , _lowerCAmelCase )
parser.add_argument(
'--max_source_length' , default=1_024 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=_lowerCAmelCase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_lowerCAmelCase )
parser.add_argument('--max_tokens_per_batch' , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument('--logger_name' , type=_lowerCAmelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=_lowerCAmelCase , default=500 , required=_lowerCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=_lowerCAmelCase , default='summarization' , required=_lowerCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=_lowerCAmelCase , default=0.0 , required=_lowerCAmelCase )
parser.add_argument('--src_lang' , type=_lowerCAmelCase , default='' , required=_lowerCAmelCase )
parser.add_argument('--tgt_lang' , type=_lowerCAmelCase , default='' , required=_lowerCAmelCase )
parser.add_argument('--eval_beams' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase )
parser.add_argument(
'--val_metric' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=_lowerCAmelCase , default=1 , required=_lowerCAmelCase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "translation"
lowercase_ = ["loss"]
lowercase_ = ["bleu"]
lowercase_ = "bleu"
def __init__( self : str , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[int] ):
super().__init__(_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
return calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=__UpperCAmelCase )
check_output_dir(__UpperCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('WANDB_PROJECT' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name , project=__UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == 'loss'
SCREAMING_SNAKE_CASE_ = generic_train(
__UpperCAmelCase , __UpperCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCAmelCase ) , early_stopping_callback=__UpperCAmelCase , logger=__UpperCAmelCase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__UpperCAmelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
lowerCamelCase__ : Optional[Any] = pl.Trainer.add_argparse_args(parser)
lowerCamelCase__ : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ : Optional[int] = parser.parse_args()
main(args) | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
"""simple docstring"""
import functools
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
@functools.cache
def min_distance(__snake_case, __snake_case ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, __snake_case ), 1 + min_distance(__snake_case, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A_ : int = False
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = 1_2
snake_case__ : Any = 1_2
snake_case__ : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case__ : int = TransformeraDModel(**__SCREAMING_SNAKE_CASE )
return model
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = """cpu"""
snake_case__ : int = self.dummy_vqvae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : List[Any] = self.dummy_transformer
snake_case__ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
snake_case__ : Tuple = LearnedClassifierFreeSamplingEmbeddings(learnable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = """teddy bear playing in the pool"""
snake_case__ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Union[str, Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : List[Any] = output.images
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[int] = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : List[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ):
snake_case__ : int = """cpu"""
snake_case__ : Optional[Any] = self.dummy_vqvae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_transformer
snake_case__ : Tuple = VQDiffusionScheduler(self.num_embed )
snake_case__ : Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=__SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case__ : Union[str, Any] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """teddy bear playing in the pool"""
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : int = output.images
snake_case__ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Any = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case__ : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case__ : int = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : List[str] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
snake_case__ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
import torch
from diffusers import DiffusionPipeline
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
def __call__( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
a__: Union[str, Any] = 1
a__: Union[str, Any] = self.unet(lowercase , lowercase).sample
a__: Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__: str = scheduler_output - scheduler_output + torch.ones_like(lowercase)
return result
| 705 | """simple docstring"""
import math
def __a ( ) ->None:
a__: int = input('Enter message: ' )
a__: List[str] = int(input(F'Enter key [2-{len(_SCREAMING_SNAKE_CASE ) - 1}]: ' ) )
a__: int = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
a__: Optional[int] = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('d' ):
a__: List[str] = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = [''] * key
for col in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = col
while pointer < len(_SCREAMING_SNAKE_CASE ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Tuple = math.ceil(len(_SCREAMING_SNAKE_CASE ) / key )
a__: Dict = key
a__: str = (num_cols * num_rows) - len(_SCREAMING_SNAKE_CASE )
a__: int = [''] * num_cols
a__: Any = 0
a__: Union[str, Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
a__: int = 0
row += 1
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 217 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase : str = random.Random()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ):
"""simple docstring"""
if rng is None:
lowerCamelCase_ : Optional[Any] = global_rng
lowerCamelCase_ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Tuple=400 , UpperCamelCase_ : Tuple=2_000 , UpperCamelCase_ : str=10 , UpperCamelCase_ : int=160 , UpperCamelCase_ : Tuple=8 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=4_000 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Union[str, Any] = min_seq_length
lowerCamelCase_ : Tuple = max_seq_length
lowerCamelCase_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ : List[Any] = padding_value
lowerCamelCase_ : Union[str, Any] = sampling_rate
lowerCamelCase_ : List[Any] = return_attention_mask
lowerCamelCase_ : Dict = do_normalize
lowerCamelCase_ : List[str] = feature_size
lowerCamelCase_ : List[str] = chunk_length
lowerCamelCase_ : Dict = hop_length
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : int , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
def _flatten(UpperCamelCase_ : int ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowerCamelCase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ : Dict = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( _lowercase ,unittest.TestCase ):
A = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[str] = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : int = feat_extract_first.save_pretrained(UpperCamelCase_ )[0]
check_json_file_has_correct_format(UpperCamelCase_ )
lowerCamelCase_ : Dict = self.feature_extraction_class.from_pretrained(UpperCamelCase_ )
lowerCamelCase_ : List[str] = feat_extract_first.to_dict()
lowerCamelCase_ : Dict = feat_extract_second.to_dict()
lowerCamelCase_ : Tuple = feat_extract_first.mel_filters
lowerCamelCase_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Any = os.path.join(UpperCamelCase_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = feat_extract_first.to_dict()
lowerCamelCase_ : Optional[int] = feat_extract_second.to_dict()
lowerCamelCase_ : Optional[int] = feat_extract_first.mel_filters
lowerCamelCase_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : Optional[int] = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ : List[str] = feature_extractor(UpperCamelCase_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCamelCase_ : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCamelCase_ : Any = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
lowerCamelCase_ : Optional[Any] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ : Any = np.asarray(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
lowerCamelCase_ : Union[str, Any] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test truncation required
lowerCamelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCamelCase_ : Optional[int] = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
lowerCamelCase_ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase_ : Dict = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs_truncated]
lowerCamelCase_ : Tuple = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
lowerCamelCase_ : Tuple = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
import torch
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Any = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase_ : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase_ : Any = ds.sort('''id''' ).select(range(UpperCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowerCamelCase_ : Optional[Any] = self._load_datasamples(1 )
lowerCamelCase_ : Optional[int] = WhisperFeatureExtractor()
lowerCamelCase_ : Any = feature_extractor(UpperCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase_ , atol=1e-4 ) )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Optional[Any] = self._load_datasamples(1 )[0]
lowerCamelCase_ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCamelCase_ : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ ) - 1 ) < 1e-3 ) )
| 501 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
for char in word:
lowerCamelCase_ : List[str] = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = set()
for token in tokens:
lowerCamelCase_ : Optional[int] = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
lowerCamelCase_ : List[str] = list(_lowercase )
return word_list
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCamelCase_ : List[Any] = max([len(_lowercase ) for w in chinese_word_set] )
lowerCamelCase_ : int = bert_tokens
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = 0, len(_lowercase )
while start < end:
lowerCamelCase_ : int = True
if is_chinese(bert_word[start] ):
lowerCamelCase_ : Dict = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
lowerCamelCase_ : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase_ : Union[str, Any] = '''##''' + bert_word[j]
lowerCamelCase_ : Union[str, Any] = start + i
lowerCamelCase_ : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
lowerCamelCase_ : Any = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCamelCase_ : Union[str, Any] = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
lowerCamelCase_ : Optional[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
lowerCamelCase_ : Tuple = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowercase ) == len(_lowercase )
lowerCamelCase_ : str = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
lowerCamelCase_ : Tuple = []
for id in input_ids:
lowerCamelCase_ : Tuple = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
lowerCamelCase_ : List[Any] = add_sub_symbol(_lowercase , _lowercase )
lowerCamelCase_ : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
lowerCamelCase_ : Optional[int] = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ : List[Any] = f.readlines()
lowerCamelCase_ : List[Any] = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase_ : Optional[int] = LTP(args.ltp ) # faster in GPU device
lowerCamelCase_ : Tuple = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase_ : int = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ : Optional[Any] = [json.dumps(_lowercase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__lowercase : Union[str, Any] = parser.parse_args()
main(args)
| 422 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ ={
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 326 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=1_0 ):
__a : Tuple = []
for _ in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str=1_0 ):
__a : int = []
for step in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Any = os.path.join(lowerCAmelCase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowerCAmelCase__ )
__a : str = torch.load(lowerCAmelCase__ )
scheduler.load_state_dict(lowerCAmelCase__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
__a : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : Optional[int] = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase (self : Any ):
__a : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = nn.Linear(50 ,50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Any = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Tuple = 10
def lowerCAmelCase (self : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : str=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Tuple = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : Union[str, Any] = data
__a : int = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : Tuple = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
__a : Any = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__a : Tuple = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : str ):
__a : Optional[int] = fn
def __call__(self : Any , *snake_case_ : List[Any] , **snake_case_ : Any ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] ):
__a : Any = list(map(self , scheduler.lr_lambdas ) )
| 326 | 1 |
"""simple docstring"""
import re
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(snake_case_ , snake_case_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( __snake_case , __snake_case ):
assert isinstance(__snake_case , __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = ParquetDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if issubclass(__snake_case , __snake_case ):
_UpperCamelCase = parquet_path
elif issubclass(__snake_case , __snake_case ):
_UpperCamelCase = [parquet_path]
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case=("train",) ):
assert isinstance(__snake_case , __snake_case )
for split in splits:
_UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = ParquetDatasetReader({'''train''': parquet_path} , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if split:
_UpperCamelCase = {split: parquet_path}
else:
_UpperCamelCase = '''train'''
_UpperCamelCase = {'''train''': parquet_path, '''test''': parquet_path}
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = ParquetDatasetWriter(__snake_case , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_UpperCamelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
_UpperCamelCase = pf.read()
assert dataset.data.table == output_table
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
_UpperCamelCase = {'''image''': [image_path]}
_UpperCamelCase = Features({'''image''': Image()} )
_UpperCamelCase = Dataset.from_dict(__snake_case , features=__snake_case )
_UpperCamelCase = ParquetDatasetWriter(__snake_case , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_UpperCamelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_UpperCamelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _snake_case ( __snake_case , __snake_case ):
assert get_writer_batch_size(__snake_case ) == expected
| 71 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | 1 |
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
for ch in input_str:
lowerCAmelCase__ = ord(lowerCAmelCase_ )
lowerCAmelCase__ = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""CLIPFeatureExtractor"""]
lowerCAmelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 174 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__SCREAMING_SNAKE_CASE: Optional[Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_lowerCAmelCase ) , torch_builtin(_lowerCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCAmelCase ) , gelu_new(_lowerCAmelCase ) ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__SCREAMING_SNAKE_CASE: Optional[int] = get_activation('''gelu''' )
__SCREAMING_SNAKE_CASE: List[Any] = get_activation('''gelu_10''' )
__SCREAMING_SNAKE_CASE: List[str] = torch_builtin(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = geluaa(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCAmelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self ):
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_lowerCAmelCase ):
get_activation('''bogus''' )
with self.assertRaises(_lowerCAmelCase ):
get_activation(_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = get_activation('''gelu''' )
__SCREAMING_SNAKE_CASE: Any = 1
__SCREAMING_SNAKE_CASE: List[Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[int] = acta.a
| 713 |
from math import isclose, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float, float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE: Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE: Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE: Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE: Dict = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE: Union[str, Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE: Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE: Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE: Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE: Union[str, Any] = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
__SCREAMING_SNAKE_CASE: Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase ( UpperCamelCase__ : float = 1.4 , UpperCamelCase__ : float = -9.6 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = 0
__SCREAMING_SNAKE_CASE: float = first_x_coord
__SCREAMING_SNAKE_CASE: float = first_y_coord
__SCREAMING_SNAKE_CASE: float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 146 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = (EulerDiscreteScheduler,)
_UpperCamelCase = 10
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowerCAmelCase )
return config
def UpperCamelCase_ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase ,beta_end=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=_lowerCAmelCase )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase__ = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ,use_karras_sigmas=_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=_lowerCAmelCase )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase__ = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
lowerCamelCase__ = output.prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 50 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def _UpperCAmelCase (UpperCamelCase__ : list ):
if not sequence:
return []
if len(UpperCamelCase__ ) == 1:
return list(UpperCamelCase__ )
_A : Optional[Any] = 0
_A : List[Any] = len(UpperCamelCase__ ) - 1
_A : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A : List[Any] = sequence[high], sequence[mid]
high -= 1
else:
_A : List[str] = f"The elements inside the sequence must contains only {colors} values"
raise ValueError(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('Enter numbers separated by commas:\n').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
print(f"{dutch_national_flag_sort(unsorted)}")
| 503 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case = True
except (ImportError, AttributeError):
_snake_case = object
def __lowerCamelCase ( *_lowercase , **_lowercase ) -> List[str]:
pass
_snake_case = False
_snake_case = logging.get_logger('''transformers-cli/serving''')
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowercase , args.host , args.port , args.workers )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str]
SCREAMING_SNAKE_CASE_ : Optional[List[int]]
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser ):
"""simple docstring"""
UpperCamelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=SCREAMING_SNAKE_CASE__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=SCREAMING_SNAKE_CASE__ , default=88_88 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=SCREAMING_SNAKE_CASE__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=SCREAMING_SNAKE_CASE__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Pipeline , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = pipeline
UpperCamelCase = host
UpperCamelCase = port
UpperCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'Serving model over {host}:{port}' )
UpperCamelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['POST'] , ),
] , timeout=6_00 , )
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : bool = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ):
"""simple docstring"""
try:
UpperCamelCase = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
if return_ids:
UpperCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ , tokens_ids=SCREAMING_SNAKE_CASE__ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(SCREAMING_SNAKE_CASE__ )} )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : bool = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ : bool = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , ):
"""simple docstring"""
try:
UpperCamelCase = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ServeDeTokenizeResult(model='' , text=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(SCREAMING_SNAKE_CASE__ )} )
async def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCamelCase = self._pipeline(SCREAMING_SNAKE_CASE__ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(5_00 , {'error': str(SCREAMING_SNAKE_CASE__ )} )
| 170 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_snake_case = logging.getLogger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCamelCase = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( _lowercase ) -> int:
with open(_lowercase , encoding='utf_8' ) as f:
UpperCamelCase = csv.reader(_lowercase )
UpperCamelCase = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
UpperCamelCase = []
for dataset in encoded_datasets:
UpperCamelCase = len(_lowercase )
UpperCamelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCamelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCamelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCamelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
UpperCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase = with_conta
UpperCamelCase = with_conta
UpperCamelCase = len(_lowercase ) - 1
UpperCamelCase = len(_lowercase ) - 1
UpperCamelCase = with_conta
UpperCamelCase = with_conta
UpperCamelCase = mc_label
UpperCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Tuple:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_lowercase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_lowercase , type=_lowercase , required=_lowercase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_lowercase , default='' )
parser.add_argument('--eval_dataset' , type=_lowercase , default='' )
parser.add_argument('--seed' , type=_lowercase , default=42 )
parser.add_argument('--num_train_epochs' , type=_lowercase , default=3 )
parser.add_argument('--train_batch_size' , type=_lowercase , default=8 )
parser.add_argument('--eval_batch_size' , type=_lowercase , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_lowercase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_lowercase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_lowercase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_lowercase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_lowercase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_lowercase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_lowercase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_lowercase , default=0.01 )
parser.add_argument('--lm_coef' , type=_lowercase , default=0.9 )
parser.add_argument('--n_valid' , type=_lowercase , default=374 )
parser.add_argument('--server_ip' , type=_lowercase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_lowercase , default='' , help='Can be used for distant debugging.' )
UpperCamelCase = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCamelCase = ['_start_', '_delimiter_', '_classify_']
UpperCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
UpperCamelCase = tokenizer.convert_tokens_to_ids(_lowercase )
UpperCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(_lowercase ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('Encoding dataset...' )
UpperCamelCase = load_rocstories_dataset(args.train_dataset )
UpperCamelCase = load_rocstories_dataset(args.eval_dataset )
UpperCamelCase = (train_dataset, eval_dataset)
UpperCamelCase = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
UpperCamelCase = model.config.n_positions // 2 - 2
UpperCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCamelCase = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCamelCase = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
UpperCamelCase , UpperCamelCase = tensor_datasets[0], tensor_datasets[1]
UpperCamelCase = TensorDataset(*_lowercase )
UpperCamelCase = RandomSampler(_lowercase )
UpperCamelCase = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
UpperCamelCase = TensorDataset(*_lowercase )
UpperCamelCase = SequentialSampler(_lowercase )
UpperCamelCase = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCamelCase = args.max_steps
UpperCamelCase = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
UpperCamelCase = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCamelCase = list(model.named_parameters() )
UpperCamelCase = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCamelCase = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCamelCase = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCamelCase = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(_lowercase , desc='Training' )
for step, batch in enumerate(_lowercase ):
UpperCamelCase = tuple(t.to(_lowercase ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCamelCase = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCamelCase = model.module if hasattr(_lowercase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCamelCase = os.path.join(args.output_dir , _lowercase )
UpperCamelCase = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
UpperCamelCase , UpperCamelCase = 0, 0
UpperCamelCase , UpperCamelCase = 0, 0
for batch in tqdm(_lowercase , desc='Evaluating' ):
UpperCamelCase = tuple(t.to(_lowercase ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = batch
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCamelCase = mc_logits.detach().cpu().numpy()
UpperCamelCase = mc_labels.to('cpu' ).numpy()
UpperCamelCase = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCamelCase = eval_loss / nb_eval_steps
UpperCamelCase = eval_accuracy / nb_eval_examples
UpperCamelCase = tr_loss / nb_tr_steps if args.do_train else None
UpperCamelCase = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCamelCase = os.path.join(args.output_dir , 'eval_results.txt' )
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 170 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.