code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import operator as op
a = '''scaler.pt'''
a = '''pytorch_model'''
a = '''random_states'''
a = '''optimizer'''
a = '''scheduler'''
a = '''pytorch_model.bin'''
a = '''pytorch_model.bin.index.json'''
a = '''model.safetensors'''
a = '''model.safetensors.index.json'''
a = '''1.10.2'''
a = '''py38'''
a = '''4.17.0'''
a = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a = '''2.0.1'''
a = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a = ['''default''', '''reduce-overhead''', '''max-autotune''']
a = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 315
|
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315
| 1
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def A ( self : Tuple , **_a : Optional[Any] ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def A ( self : List[str] , _a : Optional[Any] ) -> Any:
'''simple docstring'''
return ("This is a test", "This is a test")
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='</s>'
_SCREAMING_SNAKE_CASE =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def A ( self : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(_a ) , 1103 )
def A ( self : int ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =(
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_SCREAMING_SNAKE_CASE =rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_SCREAMING_SNAKE_CASE =py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def A ( self : Any ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_SCREAMING_SNAKE_CASE ='<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_SCREAMING_SNAKE_CASE =[2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_SCREAMING_SNAKE_CASE =tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_SCREAMING_SNAKE_CASE ='To ensure a smooth flow of bank resolutions.'
_SCREAMING_SNAKE_CASE =[413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_SCREAMING_SNAKE_CASE =tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =['This is going to be way too long.' * 150, 'short example']
_SCREAMING_SNAKE_CASE =['not super long but more than 5 tokens', 'tiny']
_SCREAMING_SNAKE_CASE =self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='pt' )
_SCREAMING_SNAKE_CASE =self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def A ( self : str ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def A ( self : Tuple , **_a : Union[str, Any] ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def A ( self : Any , _a : Optional[int] ) -> str:
'''simple docstring'''
return ("This is a test", "This is a test")
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =(
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_SCREAMING_SNAKE_CASE =rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_SCREAMING_SNAKE_CASE =py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =['This is going to be way too long.' * 1000, 'short example']
_SCREAMING_SNAKE_CASE =['not super long but more than 5 tokens', 'tiny']
_SCREAMING_SNAKE_CASE =self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='pt' )
_SCREAMING_SNAKE_CASE =self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_SCREAMING_SNAKE_CASE =self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 114
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase : List[str] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None ) -> Any:
"""simple docstring"""
if rng is None:
_SCREAMING_SNAKE_CASE =global_rng
_SCREAMING_SNAKE_CASE =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , _a : Tuple , _a : Dict=7 , _a : List[Any]=400 , _a : List[str]=2000 , _a : Optional[Any]=10 , _a : Dict=160 , _a : Tuple=8 , _a : Any=0.0 , _a : Optional[Any]=4000 , _a : List[Any]=False , _a : Dict=True , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =min_seq_length
_SCREAMING_SNAKE_CASE =max_seq_length
_SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE =padding_value
_SCREAMING_SNAKE_CASE =sampling_rate
_SCREAMING_SNAKE_CASE =return_attention_mask
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =feature_size
_SCREAMING_SNAKE_CASE =chunk_length
_SCREAMING_SNAKE_CASE =hop_length
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Optional[Any] , _a : Any=False , _a : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(_a : Union[str, Any] ):
return list(itertools.chain(*_a ) )
if equal_length:
_SCREAMING_SNAKE_CASE =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
A__ = WhisperFeatureExtractor if is_speech_available() else None
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractionTester(self )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'feat_extract.json' )
feat_extract_first.to_json_file(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_json_file(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_SCREAMING_SNAKE_CASE =feature_extractor(_a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_SCREAMING_SNAKE_CASE =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE =np.asarray(_a )
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
_SCREAMING_SNAKE_CASE =[x[: feature_extractor.n_samples] for x in speech_inputs]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs_truncated]
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =np.random.rand(100 , 32 ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A ( self : Tuple , _a : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE =ds.sort('id' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractor()
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )[0]
_SCREAMING_SNAKE_CASE =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_SCREAMING_SNAKE_CASE =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 114
| 1
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
a__ : Union[str, Any] = logging.getLogger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'masked_bert'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="topK" , a="constant" , a=0.0 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 80
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased''']
UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( tf.keras.Model ):
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
super().__init__()
UpperCamelCase :Any = tokenizer
UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase )
def _A ( self : Tuple , __lowerCamelCase : str ):
UpperCamelCase :str = self.tokenizer(__lowerCamelCase )
UpperCamelCase :Any = self.bert(**__lowerCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Dict ):
super().setUp()
UpperCamelCase :int = [
BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase :Any = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _A ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" )
UpperCamelCase :str = tf_tokenizer(__lowerCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _A ( self : Dict ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase :str = tf_tokenizer(self.paired_sentences )
UpperCamelCase :Any = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _A ( self : List[str] ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase :List[Any] = tf.function(__lowerCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase :Any = tf.constant(__lowerCamelCase )
UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase )
UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _A ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model"""
model.save(__lowerCamelCase )
UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase )
UpperCamelCase :Dict = loaded_model(__lowerCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 38
| 0
|
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = Github(os.environ['''GITHUB_TOKEN'''] )
A_ : List[str] = g.get_repo('''huggingface/transformers''' )
A_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
A_ : str = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=__a )
A_ : Optional[int] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 365
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["image_processor", "tokenizer"]
snake_case = "CLIPImageProcessor"
snake_case = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
A_ : Tuple = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A_ : List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
A_ : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = self.tokenizer.model_input_names
A_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 65
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any]=2_81_23 ):
'''simple docstring'''
lowerCamelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase_ = set()
lowerCamelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 204
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
A__ = BlipProcessor(__lowerCamelCase,__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).tokenizer
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = BlipProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='''(BOS)''',eos_token='''(EOS)''' )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = BlipProcessor.from_pretrained(
self.tmpdirname,bos_token='''(BOS)''',eos_token='''(EOS)''',do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,__lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = processor(text=__lowerCamelCase )
A__ = tokenizer(__lowerCamelCase,return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCamelCase,images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ),['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCamelCase )
A__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCamelCase,images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor'''
__SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = False
super().__init__(__lowerCamelCase,__lowerCamelCase )
def __call__( self,__lowerCamelCase=None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 2048,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
A__ = self.tokenizer
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,**__lowerCamelCase )
else:
# add pixel_values and bbox
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,header_text=__lowerCamelCase,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
if "attention_mask" in text_encoding:
A__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
A__ = text_encoding.pop('''input_ids''' )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : Tuple , __snake_case : Optional[Any] , __snake_case : str=13 , __snake_case : Any=7 , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : str=False , __snake_case : Optional[int]=True , __snake_case : str=99 , __snake_case : Optional[int]=32 , __snake_case : List[Any]=5 , __snake_case : str=4 , __snake_case : Any=37 , __snake_case : Tuple="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Tuple=5_12 , __snake_case : Union[str, Any]=16 , __snake_case : Union[str, Any]=2 , __snake_case : str=0.02 , __snake_case : List[str]=3 , __snake_case : List[Any]=4 , __snake_case : Optional[int]=None , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ) -> List[str]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict , __snake_case : Any , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any ) -> List[str]:
_lowerCAmelCase = BioGptModel(config=_a )
model.to(_a )
model.eval()
_lowerCAmelCase = model(_a , attention_mask=_a )
_lowerCAmelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple , ) -> List[str]:
_lowerCAmelCase = BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
_lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : int , __snake_case : int , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[Any] , *__snake_case : List[Any] ) -> str:
_lowerCAmelCase = BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
_lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
_lowerCAmelCase = self.seq_length // 2
_lowerCAmelCase = 0
# first forward pass
_lowerCAmelCase = model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCAmelCase = ids_tensor((1,) , _a ).item() + 1
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
_lowerCAmelCase = model(_a , attention_mask=_a )['last_hidden_state']
_lowerCAmelCase = model(_a , past_key_values=_a , attention_mask=_a )['last_hidden_state']
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , *__snake_case : Dict ) -> Optional[Any]:
_lowerCAmelCase = BioGptModel(config=_a ).to(_a ).eval()
_lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
_lowerCAmelCase = model(_a , attention_mask=_a , use_cache=_a )
_lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase = model(_a , attention_mask=_a )['last_hidden_state']
_lowerCAmelCase = model(_a , attention_mask=_a , past_key_values=_a )[
'last_hidden_state'
]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : int , __snake_case : List[Any] , *__snake_case : List[Any] , __snake_case : Optional[Any]=False ) -> Optional[Any]:
_lowerCAmelCase = BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCAmelCase = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase__ ( self : Any , __snake_case : str , *__snake_case : List[Any] ) -> str:
_lowerCAmelCase = BioGptModel(_a )
_lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : str , *__snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
_lowerCAmelCase = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_lowercase: Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowercase: Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_lowercase: Any = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase: str = False
def lowercase__ ( self : Dict ) -> List[Any]:
_lowerCAmelCase = BioGptModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self : Tuple ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self : Optional[Any] ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def lowercase__ ( self : str ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def lowercase__ ( self : Tuple ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_a )
_lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase = 'left'
# Define PAD Token = EOS Token = 50256
_lowerCAmelCase = tokenizer.eos_token
_lowerCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCAmelCase = tokenizer(_a , return_tensors="""pt""" , padding=_a )
_lowerCAmelCase = inputs['input_ids'].to(_a )
_lowerCAmelCase = model.generate(
input_ids=_a , attention_mask=inputs["""attention_mask"""].to(_a ) , )
_lowerCAmelCase = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(_a )
_lowerCAmelCase = model.generate(input_ids=_a )
_lowerCAmelCase = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_lowerCAmelCase = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(_a )
_lowerCAmelCase = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
_lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
_lowerCAmelCase = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def lowercase__ ( self : Tuple ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(_a )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_lowerCAmelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'multi_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(_a )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_lowerCAmelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
_lowerCAmelCase = model(_a )[0]
_lowerCAmelCase = 4_23_84
_lowerCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
_lowerCAmelCase = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_a )
torch.manual_seed(0 )
_lowerCAmelCase = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(_a )
_lowerCAmelCase = model.generate(
**_a , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_a , )
_lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
_lowerCAmelCase = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_a , _a )
| 70
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208
| 0
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ = model_name.find("patch" )
UpperCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ = XCLIPVisionConfig(patch_size=lowerCAmelCase__ , num_frames=lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
UpperCAmelCase_ = 12
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 16
UpperCAmelCase_ = 24
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = 336
UpperCAmelCase_ = XCLIPConfig.from_text_vision_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
return config
def a__ ( lowerCAmelCase__ ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(lowerCAmelCase__ )
if "attn.in_proj" in key:
UpperCAmelCase_ = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ = key_split[3]
UpperCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(lowerCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ = val.T
UpperCAmelCase_ = val
return orig_state_dict
def a__ ( lowerCAmelCase__ ):
if num_frames == 8:
UpperCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase_ = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowerCAmelCase__ , repo_type="dataset" , )
UpperCAmelCase_ = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ):
UpperCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ = model_to_url[model_name]
UpperCAmelCase_ = 8
if "16-frames" in model_name:
UpperCAmelCase_ = 16
elif "shot" in model_name:
UpperCAmelCase_ = 32
UpperCAmelCase_ = get_xclip_config(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )["model"]
UpperCAmelCase_ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCAmelCase_ = VideoMAEImageProcessor(size=lowerCAmelCase__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = XCLIPProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
UpperCAmelCase_ = prepare_video(lowerCAmelCase__ )
UpperCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowerCAmelCase__ , return_tensors="pt" , padding=lowerCAmelCase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase__ )
# Verify outputs
UpperCAmelCase_ = outputs.logits_per_video
UpperCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , lowerCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
processor.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 366
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 241
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : int =int(number**0.5 )
return number == sq * sq
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : int =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a__ : int =x_den * y_den * z_den
a__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def _A ( SCREAMING_SNAKE_CASE : int = 35 ):
"""simple docstring"""
a__ : set =set()
a__ : int
a__ : Fraction =Fraction(0 )
a__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a__ : Dict =x_num * y_den + x_den * y_num
a__ : Optional[Any] =x_den * y_den
a__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ : List[Any] =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
a__ : List[str] =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a__ : int =x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
a__ : Optional[int] =int(sqrt(SCREAMING_SNAKE_CASE ) )
a__ : List[str] =int(sqrt(SCREAMING_SNAKE_CASE ) )
a__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ : List[Any] =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
a__ : List[str] =x_num * y_num
a__ : Dict =x_den * y_num + x_num * y_den
a__ : Tuple =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ : Dict =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
a__ : List[str] =x_num * x_num * y_num * y_num
a__ : Optional[int] =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =int(sqrt(SCREAMING_SNAKE_CASE ) )
a__ : Optional[Any] =int(sqrt(SCREAMING_SNAKE_CASE ) )
a__ : List[Any] =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ : Tuple =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95
|
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''gptj'''
__UpperCamelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=5_04_00 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = n_positions
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_embd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_head
SCREAMING_SNAKE_CASE__ : Any = n_inner
SCREAMING_SNAKE_CASE__ : str = rotary_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : Dict = bos_token_id
SCREAMING_SNAKE_CASE__ : str = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ : List[str] = 0
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 13
| 25
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
| 0
|
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__ = shutil.get_terminal_size()
UpperCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a__ ( enum.Enum ):
_a : Any = 0
_a : Dict = 1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict="" ):
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
forceWrite("\r" )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 102
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase__ = logging.getLogger(__name__)
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = False
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
if not self.initialized:
__lowerCAmelCase = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.retriever.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class a__ ( snake_case__ ):
def __init__( self , _A , _A , _A , _A , _A=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowerCAmelCase , __lowerCAmelCase = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
__lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A=None , **_A ):
"""simple docstring"""
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , _A ) or RagConfig.from_pretrained(_A , **_A )
__lowerCAmelCase = RagTokenizer.from_pretrained(_A , config=_A )
__lowerCAmelCase = rag_tokenizer.question_encoder
__lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCAmelCase = "custom"
__lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , _A )
else:
__lowerCAmelCase = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 102
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
def update_area_of_max_square(_snake_case ,_snake_case ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ : Optional[int] = update_area_of_max_square(_snake_case ,col + 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = update_area_of_max_square(row + 1 ,col + 1 )
SCREAMING_SNAKE_CASE__ : int = update_area_of_max_square(row + 1 ,_snake_case )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Dict = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : List[Any] = max(largest_square_area[0] ,_snake_case )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
def update_area_of_max_square_using_dp_array(
_snake_case ,_snake_case ,_snake_case ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ : List[str] = update_area_of_max_square_using_dp_array(_snake_case ,col + 1 ,_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 ,_snake_case ,_snake_case )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any = max(largest_square_area[0] ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int = [0]
SCREAMING_SNAKE_CASE__ : Any = [[-1] * cols for _ in range(_snake_case )]
update_area_of_max_square_using_dp_array(0 ,0 ,_snake_case )
return largest_square_area[0]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ : str = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : str = 1 + min(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = max(dp_array[row][col] ,_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
return largest_square_area
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
SCREAMING_SNAKE_CASE__ : Dict = current_row[col + 1]
SCREAMING_SNAKE_CASE__ : str = next_row[col + 1]
SCREAMING_SNAKE_CASE__ : Any = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = max(current_row[col] ,_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 25
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
| 1
|
import numpy as np
import qiskit
def __UpperCamelCase ( _A : int = 8 , _A : int | None = None ) ->str:
"""simple docstring"""
lowerCamelCase_ =np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase_ =6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# The set of states Alice will prepare.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Measurement basis for Bob's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Quantum Circuit to simulate BB84
lowerCamelCase_ =qiskit.QuantumCircuit(_A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase_ =qiskit.execute(_A , _A , shots=1 , seed_simulator=_A )
# Returns the result of measurement.
lowerCamelCase_ =job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase_ ="""""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A , _A , _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase_ =gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 49
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCamelCase ( _A : Optional[int] ) ->List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ =k.replace(_A , _A )
if k.startswith("""encoder""" ):
lowerCamelCase_ =k.replace(""".attn""" , """.self_attn""" )
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm3""" , """final_layer_norm""" )
return k
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCamelCase_ =sd.pop(_A )
lowerCamelCase_ =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCamelCase_ =v
__A : Any = ['START']
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] , _A : List[str] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =model["""model"""]
lowerCamelCase_ =BlenderbotConfig.from_json_file(_A )
lowerCamelCase_ =BlenderbotForConditionalGeneration(_A )
lowerCamelCase_ =m.model.state_dict().keys()
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ =rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 49
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def __lowerCAmelCase ( self ) ->Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ) ->List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) ->List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
def extract(*_lowerCamelCase , **_lowerCamelCase ):
class a_ :
"""simple docstring"""
def __init__( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = torch.ones([0] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
self.pixel_values.to(_lowerCamelCase )
return self
return Out()
return extract
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : int = 77
SCREAMING_SNAKE_CASE : int = self.dummy_image.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : Optional[int] = 77
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image.to(_lowerCamelCase )
# put models in fp16
SCREAMING_SNAKE_CASE : List[Any] = unet.half()
SCREAMING_SNAKE_CASE : List[Any] = vae.half()
SCREAMING_SNAKE_CASE : str = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE : str = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE : List[str] = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : str = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : Union[str, Any] = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE : Tuple = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE : List[Any] = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : str = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE : List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ = 3 , a_ = 1 , a_ = 1 , a_ = "relu" , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__snake_case : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__snake_case : str = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=a_ , strides=a_ , padding='''VALID''' , groups=a_ , use_bias=a_ , name='''convolution''' , )
__snake_case : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__snake_case : int = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = self.convolution(self.padding(a_ ) )
__snake_case : int = self.normalization(a_ )
__snake_case : Optional[int] = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Tuple = config.num_channels
__snake_case : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__snake_case : Dict = tf.transpose(a_ , perm=(0, 2, 3, 1) )
__snake_case : List[Any] = self.embedder(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ = 2 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Dict = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name='''convolution''' )
__snake_case : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(a_ ) , training=a_ )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
__snake_case : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.pooler(a_ )
for layer_module in self.attention:
__snake_case : List[str] = layer_module(a_ )
__snake_case : List[str] = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 1 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = in_channels != out_channels or stride != 1
__snake_case : List[str] = max(1 , out_channels // config.groups_width )
__snake_case : Union[str, Any] = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__snake_case : List[Any] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.2''' ),
]
__snake_case : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = hidden_state
for layer_module in self.layers:
__snake_case : Dict = layer_module(a_ )
__snake_case : Any = self.shortcut(a_ )
hidden_state += residual
__snake_case : Dict = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 1 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Optional[int] = in_channels != out_channels or stride != 1
__snake_case : List[Any] = max(1 , out_channels // config.groups_width )
__snake_case : List[str] = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__snake_case : List[Any] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.3''' ),
]
__snake_case : List[str] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = hidden_state
for layer_module in self.layers:
__snake_case : Optional[int] = layer_module(a_ )
__snake_case : List[str] = self.shortcut(a_ )
hidden_state += residual
__snake_case : Tuple = self.activation(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = 2 , a_ = 2 , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__snake_case : Dict = [
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , a_ , stride=a_ , name='''layers.0''' ),
*[layer(a_ , a_ , a_ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for layer_module in self.layers:
__snake_case : Tuple = layer_module(a_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__snake_case : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=f"""stages.{i+1}""" ) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False , a_ = True ):
'''simple docstring'''
__snake_case : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : Optional[Any] = hidden_states + (hidden_state,)
__snake_case : Tuple = stage_module(a_ )
if output_hidden_states:
__snake_case : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase__ =RegNetConfig
def __init__(self , a_ , **a_ ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Optional[Any] = config
__snake_case : Any = TFRegNetEmbeddings(a_ , name='''embedder''' )
__snake_case : Dict = TFRegNetEncoder(a_ , name='''encoder''' )
__snake_case : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
@unpack_inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = False , ):
'''simple docstring'''
__snake_case : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = self.embedder(a_ , training=a_ )
__snake_case : str = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
__snake_case : Tuple = encoder_outputs[0]
__snake_case : Union[str, Any] = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
__snake_case : str = tf.transpose(a_ , perm=(0, 3, 1, 2) )
__snake_case : List[Any] = tf.transpose(a_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__snake_case : List[Any] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =RegNetConfig
lowerCamelCase__ ='regnet'
lowerCamelCase__ ='pixel_values'
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE : Optional[int] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.', __snake_case, )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , *a_ , **a_ ):
'''simple docstring'''
super().__init__(a_ , *a_ , **a_ )
__snake_case : List[str] = TFRegNetMainLayer(a_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_=False , ):
'''simple docstring'''
__snake_case : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = self.regnet(
pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', __snake_case, )
class _UpperCAmelCase ( __snake_case, __snake_case ):
'''simple docstring'''
def __init__(self , a_ , *a_ , **a_ ):
'''simple docstring'''
super().__init__(a_ , *a_ , **a_ )
__snake_case : Tuple = config.num_labels
__snake_case : List[Any] = TFRegNetMainLayer(a_ , name='''regnet''' )
# classification head
__snake_case : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE (self , a_ = None , a_ = None , a_ = None , a_ = None , a_=False , ):
'''simple docstring'''
__snake_case : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Dict = self.regnet(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
__snake_case : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
__snake_case : Dict = self.classifier[0](a_ )
__snake_case : str = self.classifier[1](a_ )
__snake_case : List[str] = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ )
if not return_dict:
__snake_case : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 24
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Any = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """roc_bert"""
def __init__( self : str, lowerCamelCase : Tuple=30_522, lowerCamelCase : int=768, lowerCamelCase : int=12, lowerCamelCase : str=12, lowerCamelCase : str=3_072, lowerCamelCase : List[Any]="gelu", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : Dict=512, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Dict=0.02, lowerCamelCase : Optional[Any]=1E-12, lowerCamelCase : str=True, lowerCamelCase : List[str]=0, lowerCamelCase : Optional[Any]="absolute", lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=True, lowerCamelCase : str=True, lowerCamelCase : Union[str, Any]=768, lowerCamelCase : str=910, lowerCamelCase : Optional[int]=512, lowerCamelCase : Dict=24_858, lowerCamelCase : str=True, **lowerCamelCase : Dict, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = use_cache
lowercase__ = enable_pronunciation
lowercase__ = enable_shape
lowercase__ = pronunciation_embed_dim
lowercase__ = pronunciation_vocab_size
lowercase__ = shape_embed_dim
lowercase__ = shape_vocab_size
lowercase__ = concat_input
lowercase__ = position_embedding_type
lowercase__ = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase )
| 207
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207
| 1
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowerCamelCase : Dict = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __snake_case :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
class __snake_case ( _UpperCAmelCase ):
lowerCAmelCase_ = "titi"
lowerCAmelCase_ = "toto"
class __snake_case ( _UpperCAmelCase ):
lowerCAmelCase_ = "titi"
lowerCAmelCase_ = "toto"
lowerCAmelCase_ = 42
@dataclass
class __snake_case :
lowerCAmelCase_ = "toto"
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BasicEnum(self.foo )
@dataclass
class __snake_case :
lowerCAmelCase_ = "toto"
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MixedTypeEnum(self.foo )
@dataclass
class __snake_case :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=_UpperCAmelCase , metadata={"help": "help message"} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
@dataclass
class __snake_case :
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[1, 2, 3] )
lowerCAmelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
lowerCAmelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __snake_case :
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BasicEnum(self.required_enum )
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field()
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default="toto" , metadata={"help": "help message"} )
lowerCAmelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __snake_case :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
@dataclass
class __snake_case :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=_UpperCAmelCase , metadata={"help": "help message"} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
class __snake_case ( unittest.TestCase ):
def __a ( self : str , _lowercase : argparse.ArgumentParser , _lowercase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase_ ) and yy.get("""choices""" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase_ ) , yy["""type"""](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--bar""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--baz""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--flag""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(SCREAMING_SNAKE_CASE__ ) = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
self.argparsersEqual(lowercase_ , lowercase_ )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
SCREAMING_SNAKE_CASE__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class __snake_case :
lowerCAmelCase_ = "toto"
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE__ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--bar""" , default=lowercase_ , type=lowercase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase_ )
SCREAMING_SNAKE_CASE__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--required_str""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
SCREAMING_SNAKE_CASE__ = parser.parse_dict(lowercase_ )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(lowercase_ , """temp_json""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(lowercase_ , """temp_yaml""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 371
|
from __future__ import annotations
__lowerCamelCase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 204
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A ( a__ ):
"""simple docstring"""
def __get__( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__UpperCamelCase : Optional[Any] ='__cached_' + self.fget.__name__
__UpperCamelCase : Tuple =getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if cached is None:
__UpperCamelCase : List[Any] =self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return cached
def A ( a_ ) -> List[str]:
__UpperCamelCase : Tuple =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def A ( a_ ) -> List[Any]:
if is_torch_fx_proxy(__A ):
return True
if is_torch_available():
import torch
if isinstance(__A ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__A ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__A ,(jnp.ndarray, Tracer) ):
return True
return isinstance(__A ,np.ndarray )
def A ( a_ ) -> Tuple:
return isinstance(__A ,np.ndarray )
def A ( a_ ) -> Tuple:
return _is_numpy(__A )
def A ( a_ ) -> str:
import torch
return isinstance(__A ,torch.Tensor )
def A ( a_ ) -> Dict:
return False if not is_torch_available() else _is_torch(__A )
def A ( a_ ) -> List[str]:
import torch
return isinstance(__A ,torch.device )
def A ( a_ ) -> Any:
return False if not is_torch_available() else _is_torch_device(__A )
def A ( a_ ) -> str:
import torch
if isinstance(__A ,__A ):
if hasattr(__A ,__A ):
__UpperCamelCase : Tuple =getattr(__A ,__A )
else:
return False
return isinstance(__A ,torch.dtype )
def A ( a_ ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch_dtype(__A )
def A ( a_ ) -> List[str]:
import tensorflow as tf
return isinstance(__A ,tf.Tensor )
def A ( a_ ) -> int:
return False if not is_tf_available() else _is_tensorflow(__A )
def A ( a_ ) -> Optional[Any]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__A ,'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__A )
return type(__A ) == tf.Tensor
def A ( a_ ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__A )
def A ( a_ ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__A ,jnp.ndarray )
def A ( a_ ) -> int:
return False if not is_flax_available() else _is_jax(__A )
def A ( a_ ) -> Any:
if isinstance(__A ,(dict, UserDict) ):
return {k: to_py_obj(__A ) for k, v in obj.items()}
elif isinstance(__A ,(list, tuple) ):
return [to_py_obj(__A ) for o in obj]
elif is_tf_tensor(__A ):
return obj.numpy().tolist()
elif is_torch_tensor(__A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__A ):
return np.asarray(__A ).tolist()
elif isinstance(__A ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( a_ ) -> str:
if isinstance(__A ,(dict, UserDict) ):
return {k: to_numpy(__A ) for k, v in obj.items()}
elif isinstance(__A ,(list, tuple) ):
return np.array(__A )
elif is_tf_tensor(__A ):
return obj.numpy()
elif is_torch_tensor(__A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__A ):
return np.asarray(__A )
else:
return obj
class __A ( a__ ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
__UpperCamelCase : int =getattr(self , class_fields[0].name )
__UpperCamelCase : Optional[int] =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =first_field.items()
__UpperCamelCase : Optional[int] =True
else:
try:
__UpperCamelCase : List[str] =iter(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =True
except TypeError:
__UpperCamelCase : Optional[int] =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ , (list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] , lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCamelCase : Optional[int] =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCamelCase : int =element[1]
elif first_field is not None:
__UpperCamelCase : Union[str, Any] =first_field
else:
for field in class_fields:
__UpperCamelCase : str =getattr(self , field.name )
if v is not None:
__UpperCamelCase : Dict =v
def __delitem__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : str =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ , lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ , lowerCamelCase__ )
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__setitem__(lowerCamelCase__ , lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class __A ( a__ , a__ ):
"""simple docstring"""
@classmethod
def __lowercase ( cls , lowerCamelCase__ ):
"""simple docstring"""
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __A ( a__ ):
"""simple docstring"""
UpperCamelCase__ : List[Any] ="""longest"""
UpperCamelCase__ : str ="""max_length"""
UpperCamelCase__ : List[str] ="""do_not_pad"""
class __A ( a__ ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] ="""pt"""
UpperCamelCase__ : Optional[Any] ="""tf"""
UpperCamelCase__ : Optional[Any] ="""np"""
UpperCamelCase__ : Any ="""jax"""
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =context_managers
__UpperCamelCase : str =ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
self.stack.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def A ( a_ ) -> Tuple:
__UpperCamelCase : Optional[int] =infer_framework(__A )
if framework == "tf":
__UpperCamelCase : Tuple =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase : str =inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase : str =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( a_ ) -> Tuple:
__UpperCamelCase : Union[str, Any] =model_class.__name__
__UpperCamelCase : List[str] =infer_framework(__A )
if framework == "tf":
__UpperCamelCase : Optional[int] =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase : str =inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase : Optional[int] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( a_ ,a_ = "" ,a_ = "." ) -> Any:
def _flatten_dict(a_ ,a_="" ,a_="." ):
for k, v in d.items():
__UpperCamelCase : List[Any] =str(__A ) + delimiter + str(__A ) if parent_key else k
if v and isinstance(__A ,__A ):
yield from flatten_dict(__A ,__A ,delimiter=__A ).items()
else:
yield key, v
return dict(_flatten_dict(__A ,__A ,__A ) )
@contextmanager
def A ( a_ ,a_ = False ) -> Any:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( a_ ,a_=None ) -> Optional[Any]:
if is_numpy_array(__A ):
return np.transpose(__A ,axes=__A )
elif is_torch_tensor(__A ):
return array.T if axes is None else array.permute(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.transpose(__A ,perm=__A )
elif is_jax_tensor(__A ):
return jnp.transpose(__A ,axes=__A )
else:
raise ValueError(F'Type not supported for transpose: {type(__A )}.' )
def A ( a_ ,a_ ) -> str:
if is_numpy_array(__A ):
return np.reshape(__A ,__A )
elif is_torch_tensor(__A ):
return array.reshape(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.reshape(__A ,__A )
elif is_jax_tensor(__A ):
return jnp.reshape(__A ,__A )
else:
raise ValueError(F'Type not supported for reshape: {type(__A )}.' )
def A ( a_ ,a_=None ) -> Optional[Any]:
if is_numpy_array(__A ):
return np.squeeze(__A ,axis=__A )
elif is_torch_tensor(__A ):
return array.squeeze() if axis is None else array.squeeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.squeeze(__A ,axis=__A )
elif is_jax_tensor(__A ):
return jnp.squeeze(__A ,axis=__A )
else:
raise ValueError(F'Type not supported for squeeze: {type(__A )}.' )
def A ( a_ ,a_ ) -> List[str]:
if is_numpy_array(__A ):
return np.expand_dims(__A ,__A )
elif is_torch_tensor(__A ):
return array.unsqueeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.expand_dims(__A ,axis=__A )
elif is_jax_tensor(__A ):
return jnp.expand_dims(__A ,axis=__A )
else:
raise ValueError(F'Type not supported for expand_dims: {type(__A )}.' )
def A ( a_ ) -> str:
if is_numpy_array(__A ):
return np.size(__A )
elif is_torch_tensor(__A ):
return array.numel()
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.size(__A )
elif is_jax_tensor(__A ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(__A )}.' )
def A ( a_ ,a_ ) -> Dict:
for key, value in auto_map.items():
if isinstance(__A ,(tuple, list) ):
__UpperCamelCase : List[str] =[F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCamelCase : List[str] =F'{repo_id}--{value}'
return auto_map
def A ( a_ ) -> List[str]:
for base_class in inspect.getmro(__A ):
__UpperCamelCase : Any =base_class.__module__
__UpperCamelCase : Any =base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 71
|
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80
| 0
|
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a__: str = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str )->Optional[Any]:
A__ = nn.functional.normalize(UpperCamelCase__ )
A__ = nn.functional.normalize(UpperCamelCase__ )
return torch.mm(UpperCamelCase__ , normalized_text_embeds.t() )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = CLIPConfig
__SCREAMING_SNAKE_CASE = ['''CLIPEncoderLayer''']
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size,config.projection_dim,bias=__lowerCamelCase )
A__ = nn.Parameter(torch.ones(17,config.projection_dim ),requires_grad=__lowerCamelCase )
A__ = nn.Parameter(torch.ones(3,config.projection_dim ),requires_grad=__lowerCamelCase )
A__ = nn.Parameter(torch.ones(17 ),requires_grad=__lowerCamelCase )
A__ = nn.Parameter(torch.ones(3 ),requires_grad=__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.vision_model(__lowerCamelCase )[1] # pooled_output
A__ = self.visual_projection(__lowerCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(__lowerCamelCase,self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(__lowerCamelCase,self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(__lowerCamelCase ):
A__ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment,3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment,3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowerCamelCase )
result.append(__lowerCamelCase )
A__ = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.vision_model(__lowerCamelCase )[1] # pooled_output
A__ = self.visual_projection(__lowerCamelCase )
A__ = cosine_distance(__lowerCamelCase,self.special_care_embeds )
A__ = cosine_distance(__lowerCamelCase,self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0,dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1,cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0,dim=1 )
return images, has_nsfw_concepts
| 193
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps,torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCamelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCamelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,)
def UpperCamelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50],[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase,num_inference_steps=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, eta in zip([1, 10, 49],[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase,eta=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420,400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980,960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487,486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999,998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea],dim=0 )
A__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1,__lowerCamelCase )
A__ = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
A__ = scheduler.batch_step_no_noise(__lowerCamelCase,timesteps.flatten(0,1 ),samples.flatten(0,1 ),__lowerCamelCase )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 193
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]: # This function is recursive
a = len(__UpperCamelCase)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a = array[0]
a = False
a = 1
a = []
while not is_found and i < array_length:
if array[i] < pivot:
a = True
a = [element for element in array[i:] if element >= array[i]]
a = longest_subsequence(__UpperCamelCase)
if len(__UpperCamelCase) > len(__UpperCamelCase):
a = temp_array
else:
i += 1
a = [element for element in array[1:] if element >= pivot]
a = [pivot, *longest_subsequence(__UpperCamelCase)]
if len(__UpperCamelCase) > len(__UpperCamelCase):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class a__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : Any = """resnet"""
a : Tuple = ["""basic""", """bottleneck"""]
def __init__( self , A=3 , A=64 , A=[256, 512, 1024, 2048] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> Any:
'''simple docstring'''
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
a = num_channels
a = embedding_size
a = hidden_sizes
a = depths
a = layer_type
a = hidden_act
a = downsample_in_first_stage
a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class a__ ( UpperCamelCase__ ):
a : Optional[int] = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 180
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =size if size is not None else {"shortest_edge": 224}
lowerCamelCase__: Optional[Any] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: Tuple =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="crop_size")
lowerCamelCase__: Union[str, Any] =do_resize
lowerCamelCase__: Union[str, Any] =size
lowerCamelCase__: Any =resample
lowerCamelCase__: List[str] =do_center_crop
lowerCamelCase__: List[Any] =crop_size
lowerCamelCase__: List[str] =do_rescale
lowerCamelCase__: List[str] =rescale_factor
lowerCamelCase__: str =do_normalize
lowerCamelCase__: Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__: List[Any] =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__: int =do_convert_rgb
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
lowerCamelCase__: Optional[int] =get_resize_output_image_size(UpperCAmelCase_ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: int =get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ) ->int:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ) ->PIL.Image.Image:
'''simple docstring'''
lowerCamelCase__: Tuple =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: List[Any] =size if size is not None else self.size
lowerCamelCase__: List[Any] =get_size_dict(UpperCAmelCase_ , param_name="size" , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Any =resample if resample is not None else self.resample
lowerCamelCase__: Any =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: Any =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: Union[str, Any] =get_size_dict(UpperCAmelCase_ , param_name="crop_size" , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: int =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Optional[Any] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: Optional[int] =image_std if image_std is not None else self.image_std
lowerCamelCase__: Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__: Dict =make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__: List[Any] =[convert_to_rgb(UpperCAmelCase_) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__: str =[to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
lowerCamelCase__: Union[str, Any] =[self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
lowerCamelCase__: Optional[Any] =[self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
lowerCamelCase__: Any =[self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
lowerCamelCase__: List[str] =[self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
lowerCamelCase__: List[str] =[to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
lowerCamelCase__: str ={"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 10
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
| 0
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __snake_case ( nn.Module ):
def __init__( self : Any , _lowercase : Any = 16 , _lowercase : List[Any] = 88 , _lowercase : int = None , _lowercase : Optional[Any] = 1 , _lowercase : Dict = 0.0 , _lowercase : List[Any] = 32 , _lowercase : Tuple = None , _lowercase : List[str] = False , _lowercase : List[str] = None , _lowercase : List[Any] = None , _lowercase : Union[str, Any] = "geglu" , _lowercase : List[str] = None , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , num_layers=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , cross_attention_dim=_SCREAMING_SNAKE_CASE , attention_bias=_SCREAMING_SNAKE_CASE , sample_size=_SCREAMING_SNAKE_CASE , num_vector_embeds=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , num_embeds_ada_norm=_SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE__ = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE__ = [1, 0]
def __a ( self : Optional[Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[str]=None , _lowercase : Union[str, Any]=None , _lowercase : str=None , _lowercase : int = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = hidden_states
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE__ = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE__ = self.transformers[transformer_index](
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , cross_attention_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_SCREAMING_SNAKE_CASE )
| 355
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase : Tuple = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase : Optional[Any] = '''main'''
# Default branch name
__lowerCamelCase : Optional[int] = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
__lowerCamelCase : Any = '''aaaaaaa'''
# This commit does not exist, so we should 404.
__lowerCamelCase : List[str] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase : List[Any] = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __snake_case ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __a ( self : List[str] , _lowercase : str ):
"""simple docstring"""
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __a ( self : Optional[Any] , _lowercase : str ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __a ( self : Tuple , _lowercase : Dict ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , ["""labels"""] )
self.assertEqual(find_labels(_lowercase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_lowercase ) , ["""start_positions""", """end_positions"""] )
class __snake_case ( lowerCamelCase_ ):
pass
self.assertEqual(find_labels(_lowercase ) , ["""labels"""] )
@require_tf
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , ["""labels"""] )
self.assertEqual(find_labels(_lowercase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_lowercase ) , ["""start_positions""", """end_positions"""] )
class __snake_case ( lowerCamelCase_ ):
pass
self.assertEqual(find_labels(_lowercase ) , ["""labels"""] )
@require_flax
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class __snake_case ( lowerCamelCase_ ):
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 204
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = False , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1 , ):
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(_UpperCAmelCase )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) # picking a random neighbor
__a = neighbors.pop(_UpperCAmelCase )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCAmelCase ) , _UpperCAmelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case :List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__snake_case :str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :Any = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (3 * x**2) - (6 * y)
__snake_case :List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
__snake_case :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 49
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22
| 0
|
'''simple docstring'''
from typing import Any
def __magic_name__ ( A ) -> list[Any]:
if not input_list:
return []
snake_case = [input_list.count(A ) for value in input_list]
snake_case = max(A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332
| 1
|
from collections import deque
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : int = process_name # process name
__A : Union[str, Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__A : str = arrival_time
__A : Tuple = burst_time # remaining burst time
__A : int = 0 # total time of the process wait in ready queue
__A : Tuple = 0 # time from arrival time to completion time
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , ):
# total number of mlfq's queues
__A : Any = number_of_queues
# time slice of queues that round robin algorithm applied
__A : Optional[int] = time_slices
# unfinished process is in this ready_queue
__A : Dict = queue
# current time
__A : Tuple = current_time
# finished process is in this sequence queue
__A : deque[Process] = deque()
def UpperCAmelCase_ ( self ):
__A : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self , _A ):
__A : Dict = []
for i in range(len(_A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = []
for i in range(len(_A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self , _A ):
__A : str = []
for i in range(len(_A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self , _A ):
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self , _A ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self , _A ):
__A : deque[Process] = deque() # sequence deque of finished process
while len(_A ) != 0:
__A : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__A : List[str] = 0
# set the process's turnaround time because it is finished
__A : List[Any] = self.current_time - cp.arrival_time
# set the completion time
__A : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self , _A , _A ):
__A : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_A ) ):
__A : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__A : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__A : Any = 0
# set the finish time
__A : str = self.current_time
# update the process' turnaround time because it is finished
__A : Optional[int] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__A , __A : Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCAmelCase : Any = Process('''P1''', 0, 53)
UpperCAmelCase : Dict = Process('''P2''', 0, 17)
UpperCAmelCase : List[str] = Process('''P3''', 0, 68)
UpperCAmelCase : Optional[int] = Process('''P4''', 0, 24)
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : int = [17, 25]
UpperCAmelCase : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCAmelCase : List[Any] = Process('''P1''', 0, 53)
UpperCAmelCase : Optional[Any] = Process('''P2''', 0, 17)
UpperCAmelCase : List[str] = Process('''P3''', 0, 68)
UpperCAmelCase : Any = Process('''P4''', 0, 24)
UpperCAmelCase : Dict = 3
UpperCAmelCase : List[str] = [17, 25]
UpperCAmelCase : Dict = deque([Pa, Pa, Pa, Pa])
UpperCAmelCase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCAmelCase : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 280
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 358
|
from __future__ import annotations
from statistics import mean
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case = 4
__snake_case = [2, 5, 3, 7]
__snake_case = [0, 0, 0, 0]
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 169
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase : Union[str, Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __A ( self , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = TextaTextGenerationPipeline(model=A , tokenizer=A )
return generator, ["Something to write", "Something else"]
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = generator("""Something there""" )
self.assertEqual(A , [{"""generated_text""": ANY(A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
[{"""generated_text""": ANY(A )}, {"""generated_text""": ANY(A )}],
] , )
with self.assertRaises(A ):
generator(4 )
@require_torch
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=A )
self.assertEqual(A , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=A , num_beams=A , )
lowerCamelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(A , A )
lowerCamelCase = generator("""This is a test""" , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = "<pad>"
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=A )
self.assertEqual(A , [{"""generated_text""": """"""}] )
| 252
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase : Optional[int] ={
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] ={
'''RUCAIBox/mvp''': 1_024,
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = MvpTokenizer
def __init__( self : Optional[Any] , lowercase : Any=None , lowercase : List[Any]=None , lowercase : Dict=None , lowercase : int="replace" , lowercase : int="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : List[str]="<s>" , lowercase : List[str]="<unk>" , lowercase : List[str]="<pad>" , lowercase : Tuple="<mask>" , lowercase : Tuple=False , lowercase : Dict=True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowercase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :List[str] = getattr(lowercase , pre_tok_state.pop("type" ) )
lowercase_ :int = add_prefix_space
lowercase_ :Optional[int] = pre_tok_class(**lowercase )
lowercase_ :Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ :List[Any] = "post_processor"
lowercase_ :str = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowercase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ :int = tuple(state["sep"] )
if "cls" in state:
lowercase_ :Any = tuple(state["cls"] )
lowercase_ :int = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :Union[str, Any] = add_prefix_space
lowercase_ :int = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowercase_ :Any = trim_offsets
lowercase_ :int = True
if changes_to_apply:
lowercase_ :Tuple = getattr(lowercase , state.pop("type" ) )
lowercase_ :Any = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowercase_ :Union[str, Any] = value
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Any ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict , lowercase : int=None ):
"""simple docstring"""
lowercase_ :List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''t5'''
A__ = ['''past_key_values''']
A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : str = d_model
__snake_case : str = d_kv
__snake_case : List[Any] = d_ff
__snake_case : List[str] = num_layers
__snake_case : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Union[str, Any] = num_heads
__snake_case : Tuple = relative_attention_num_buckets
__snake_case : Optional[int] = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : str = layer_norm_epsilon
__snake_case : List[str] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@property
def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : Tuple = 'past_encoder_sequence + sequence'
__snake_case : Dict = {0: 'batch'}
__snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 0
| 1
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = OmegaConf.load(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = torch.load(__UpperCAmelCase , map_location="""cpu""" )["""model"""]
lowerCAmelCase__ : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Union[str, Any] = """first_stage_model."""
for key in keys:
if key.startswith(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Optional[Any] = """model.diffusion_model."""
for key in keys:
if key.startswith(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = state_dict[key]
lowerCAmelCase__ : Any = config.model.params.first_stage_config.params
lowerCAmelCase__ : Dict = config.model.params.unet_config.params
lowerCAmelCase__ : Union[str, Any] = VQModel(**__UpperCAmelCase ).eval()
vqvae.load_state_dict(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = UNetLDMModel(**__UpperCAmelCase ).eval()
unet.load_state_dict(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCAmelCase , )
lowerCAmelCase__ : Union[str, Any] = LDMPipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipeline.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 242
|
"""simple docstring"""
from string import ascii_uppercase
_A = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase__ : int = """"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Dict = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCAmelCase__ : Union[str, Any] = str(__UpperCAmelCase )
new_value += actual_value
lowerCAmelCase__ : Optional[Any] = num // base
lowerCAmelCase__ : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242
| 1
|
"""simple docstring"""
def _A (__a = 10 , __a = 22 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = range(1 , __a )
SCREAMING_SNAKE_CASE_ : Tuple = range(1 , __a )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__SCREAMING_SNAKE_CASE = i + 1
else:
__SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 100
|
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans
| 262
| 0
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 12
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 12
| 1
|
"""simple docstring"""
from math import isqrt
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10**8 ) -> int:
'''simple docstring'''
lowercase_ = calculate_prime_numbers(max_number // 2 )
lowercase_ = 0
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 136
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 1
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[Any]:
a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
a = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw).convert("RGB")
a = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)),
])
a = transform(__UpperCamelCase).unsqueeze(0).to(__UpperCamelCase)
return image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[int]:
if "visual_encoder" in key:
a = re.sub("visual_encoder*" , "vision_model.encoder" , __UpperCamelCase)
if "blocks" in key:
a = re.sub(r"blocks" , "layers" , __UpperCamelCase)
if "attn" in key:
a = re.sub(r"attn" , "self_attn" , __UpperCamelCase)
if "norm1" in key:
a = re.sub(r"norm1" , "layer_norm1" , __UpperCamelCase)
if "norm2" in key:
a = re.sub(r"norm2" , "layer_norm2" , __UpperCamelCase)
if "encoder.norm" in key:
a = re.sub(r"encoder.norm" , "post_layernorm" , __UpperCamelCase)
if "encoder.patch_embed.proj" in key:
a = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __UpperCamelCase)
if "encoder.pos_embed" in key:
a = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , __UpperCamelCase)
if "encoder.cls_token" in key:
a = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , __UpperCamelCase)
if "self_attn" in key:
a = re.sub(r"self_attn.proj" , "self_attn.projection" , __UpperCamelCase)
return key
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase=None) -> List[str]:
if config_path is not None:
a = BlipConfig.from_pretrained(__UpperCamelCase)
else:
a = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={})
a = BlipForConditionalGeneration(__UpperCamelCase).eval()
a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
a = blip_decoder(pretrained=__UpperCamelCase , image_size=3_84 , vit="base")
a = pt_model.eval()
a = pt_model.state_dict()
for key in modified_state_dict.copy():
a = modified_state_dict.pop(__UpperCamelCase)
a = rename_key(__UpperCamelCase)
a = value
hf_model.load_state_dict(__UpperCamelCase)
a = 3_84
a = load_demo_image(image_size=__UpperCamelCase , device="cpu")
a = BertTokenizer.from_pretrained("bert-base-uncased")
a = tokenizer(["a picture of"]).input_ids
a = hf_model.generate(__UpperCamelCase , __UpperCamelCase)
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
a = hf_model.generate(__UpperCamelCase)
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
a = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
a = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit="base")
vqa_model.eval()
a = vqa_model.state_dict()
for key in modified_state_dict.copy():
a = modified_state_dict.pop(__UpperCamelCase)
a = rename_key(__UpperCamelCase)
a = value
a = BlipForQuestionAnswering(__UpperCamelCase)
hf_vqa_model.load_state_dict(__UpperCamelCase)
a = ["How many dogs are in this image?"]
a = tokenizer(__UpperCamelCase , return_tensors="pt").input_ids
a = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
a = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
a = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit="base")
itm_model.eval()
a = itm_model.state_dict()
for key in modified_state_dict.copy():
a = modified_state_dict.pop(__UpperCamelCase)
a = rename_key(__UpperCamelCase)
a = value
a = BlipForImageTextRetrieval(__UpperCamelCase)
a = ["A picture of a woman with a dog sitting in a beach"]
a = tokenizer(
__UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase)
hf_itm_model.eval()
a = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase)
a = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase)
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 368
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a : Dict = 8
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Tuple=BITS ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = x.device
_lowerCAmelCase : Any = (x * 255).int().clamp(0 ,255 )
_lowerCAmelCase : Union[str, Any] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCamelCase )
_lowerCAmelCase : Any = rearrange(_lowerCamelCase ,"""d -> d 1 1""" )
_lowerCAmelCase : str = rearrange(_lowerCamelCase ,"""b c h w -> b c 1 h w""" )
_lowerCAmelCase : Dict = ((x & mask) != 0).float()
_lowerCAmelCase : Any = rearrange(_lowerCamelCase ,"""b c d h w -> b (c d) h w""" )
_lowerCAmelCase : str = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any]=BITS ) -> List[Any]:
_lowerCAmelCase : List[str] = x.device
_lowerCAmelCase : Optional[int] = (x > 0).int()
_lowerCAmelCase : Optional[Any] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCamelCase ,dtype=torch.intaa )
_lowerCAmelCase : Dict = rearrange(_lowerCamelCase ,"""d -> d 1 1""" )
_lowerCAmelCase : Dict = rearrange(_lowerCamelCase ,"""b (c d) h w -> b c d h w""" ,d=8 )
_lowerCAmelCase : Dict = reduce(x * mask ,"""b c d h w -> b c h w""" ,"""sum""" )
return (dec / 255).clamp(0.0 ,1.0 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,_lowerCamelCase : torch.FloatTensor ,_lowerCamelCase : int ,_lowerCamelCase : torch.FloatTensor ,_lowerCamelCase : float = 0.0 ,_lowerCamelCase : bool = True ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowerCAmelCase : Tuple = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowerCAmelCase : List[Any] = self.alphas_cumprod[timestep]
_lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowerCAmelCase : List[Any] = self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase : Union[str, Any] = torch.clamp(_lowerCamelCase ,-scale ,_lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowerCAmelCase : Any = self._get_variance(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowerCAmelCase : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowerCAmelCase : List[Any] = model_output.device if torch.is_tensor(_lowerCamelCase ) else """cpu"""
_lowerCAmelCase : Optional[Any] = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Tuple = self._get_variance(_lowerCamelCase ,_lowerCamelCase ) ** 0.5 * eta * noise
_lowerCAmelCase : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCamelCase ,pred_original_sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,_lowerCamelCase : torch.FloatTensor ,_lowerCamelCase : int ,_lowerCamelCase : torch.FloatTensor ,_lowerCamelCase : Any="epsilon" ,_lowerCamelCase : str=None ,_lowerCamelCase : bool = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
_lowerCAmelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowerCAmelCase , _lowerCAmelCase : int = torch.split(_lowerCamelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCAmelCase : Any = None
# 1. compute alphas, betas
_lowerCAmelCase : Dict = self.alphas_cumprod[t]
_lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowerCAmelCase : str = 1 - alpha_prod_t
_lowerCAmelCase : Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowerCAmelCase : str = model_output
else:
raise ValueError(f"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
_lowerCAmelCase : Optional[int] = self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase : List[Any] = torch.clamp(_lowerCamelCase ,-scale ,_lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowerCAmelCase : List[str] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCAmelCase : Optional[Any] = 0
if t > 0:
_lowerCAmelCase : Dict = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=_lowerCamelCase ).to(model_output.device )
_lowerCAmelCase : List[str] = (self._get_variance(_lowerCamelCase ,predicted_variance=_lowerCamelCase ) ** 0.5) * noise
_lowerCAmelCase : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCamelCase ,pred_original_sample=_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ = 1.0 , ):
super().__init__()
_lowerCAmelCase : Optional[int] = bit_scale
_lowerCAmelCase : int = (
ddim_bit_scheduler_step if isinstance(a__ , a__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a__ , scheduler=a__ )
@torch.no_grad()
def __call__( self , a__ = 256 , a__ = 256 , a__ = 50 , a__ = None , a__ = 1 , a__ = "pil" , a__ = True , **a__ , ):
_lowerCAmelCase : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a__ , )
_lowerCAmelCase : Dict = decimal_to_bits(a__ ) * self.bit_scale
_lowerCAmelCase : Dict = latents.to(self.device )
self.scheduler.set_timesteps(a__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_lowerCAmelCase : Tuple = self.unet(a__ , a__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(a__ , a__ , a__ ).prev_sample
_lowerCAmelCase : Dict = bits_to_decimal(a__ )
if output_type == "pil":
_lowerCAmelCase : Optional[Any] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 367
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Dict , _a : Tuple , _a : List[Any] , _a : Tuple=None , _a : Dict=None , _a : Any="<s>" , _a : Union[str, Any]="</s>" , _a : str="</s>" , _a : int="<pad>" , _a : str="<unk>" , _a : Tuple="m2m100" , _a : Optional[Dict[str, Any]] = None , _a : str=8 , **_a : str , ):
a__: str ={} if sp_model_kwargs is None else sp_model_kwargs
a__: Optional[int] =language_codes
a__: Dict =FAIRSEQ_LANGUAGE_CODES[language_codes]
a__: Tuple ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__: Any =kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_a )
for lang_code in fairseq_language_code
if self.get_lang_token(_a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , language_codes=_a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_a , **_a , )
a__: Optional[Any] =vocab_file
a__: Tuple =load_json(_a )
a__: Any ={v: k for k, v in self.encoder.items()}
a__: List[str] =spm_file
a__: str =load_spm(_a , self.sp_model_kwargs )
a__: Any =len(self.encoder )
a__: Dict ={
self.get_lang_token(_a ): self.encoder_size + i for i, lang_code in enumerate(_a )
}
a__: List[Any] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(_a )}
a__: Dict ={v: k for k, v in self.lang_token_to_id.items()}
a__: List[str] =src_lang if src_lang is not None else "en"
a__: Any =tgt_lang
a__: Tuple =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__: str =num_madeup_words
@property
def _lowerCamelCase ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self : List[str] ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Tuple , _a : str ):
a__: Optional[int] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self : int , _a : str ):
return self.sp_model.encode(_a , out_type=_a )
def _lowerCamelCase ( self : Tuple , _a : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self : int , _a : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_a , self.unk_token )
def _lowerCamelCase ( self : Dict , _a : List[str] ):
a__: str =[]
a__: Union[str, Any] =""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
a__: Dict =[]
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _lowerCamelCase ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
a__: Union[str, Any] =[1] * len(self.prefix_tokens )
a__: Optional[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : Dict ):
a__: List[Any] ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: Union[str, Any] =None
return state
def __setstate__( self : Tuple , _a : Dict ):
a__: str =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: Optional[Any] ={}
a__: Optional[Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self : Any , _a : str , _a : Optional[str] = None ):
a__: Union[str, Any] =Path(_a )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__: Union[str, Any] =save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__: Optional[int] =save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , "wb" ) as fi:
a__: str =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def _lowerCamelCase ( self : List[str] , _a : List[str] , _a : str = "en" , _a : Optional[List[str]] = None , _a : str = "ro" , **_a : Optional[Any] , ):
a__: Tuple =src_lang
a__: int =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowerCamelCase ( self : List[str] , _a : Dict , _a : Optional[str] , _a : Optional[str] , **_a : Optional[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__: Dict =src_lang
a__: Optional[int] =self(_a , add_special_tokens=_a , **_a )
a__: Union[str, Any] =self.get_lang_id(_a )
a__: Tuple =tgt_lang_id
return inputs
def _lowerCamelCase ( self : List[Any] ):
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self : List[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self : Union[str, Any] , _a : str ):
a__: Tuple =self.get_lang_token(_a )
a__: Optional[int] =self.lang_token_to_id[lang_token]
a__: Any =[self.cur_lang_id]
a__: Optional[Any] =[self.eos_token_id]
def _lowerCamelCase ( self : str , _a : str ):
a__: List[str] =self.get_lang_token(_a )
a__: Optional[Any] =self.lang_token_to_id[lang_token]
a__: Optional[int] =[self.cur_lang_id]
a__: Dict =[self.eos_token_id]
def _lowerCamelCase ( self : Any , _a : str ):
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self : int , _a : str ):
a__: int =self.get_lang_token(_a )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ):
a__: Tuple =sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def __lowerCamelCase ( __magic_name__ : str ):
with open(__magic_name__ , "r" ) as f:
return json.load(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str ):
with open(__magic_name__ , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 )
| 42
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = XLNetTokenizer
a__ = XLNetTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Dict = XLNetTokenizer(lowercase , keep_accents=lowercase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Any = '<s>'
a__: Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(lowercase) , 10_06)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase)
a__: List[Any] = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , [2_85, 46, 10, 1_70, 3_82])
a__: Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a__: str = tokenizer.convert_tokens_to_ids(lowercase)
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4])
a__: List[Any] = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = XLNetTokenizer(lowercase , do_lower_case=lowercase)
a__: Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = XLNetTokenizer(lowercase , do_lower_case=lowercase)
a__: Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = XLNetTokenizer.from_pretrained('xlnet-base-cased')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: Tuple = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 290
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (EulerDiscreteScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(lowerCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_)
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 10.0_807) < 1E-2
assert abs(result_mean.item() - 0.0_131) < 1E-3
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase_ = scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ = sample.to(lowerCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_)
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 0.0_002) < 1E-2
assert abs(result_mean.item() - 2.2676E-06) < 1E-3
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_)
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ = sample.to(lowerCAmelCase_)
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_)
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 10.0_807) < 1E-2
assert abs(result_mean.item() - 0.0_131) < 1E-3
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_ , use_karras_sigmas=lowerCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_)
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ = sample.to(lowerCAmelCase_)
for t in scheduler.timesteps:
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_)
lowercase_ = output.prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 124.52_299_499_511_719) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963) < 1E-3
| 313
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
'''simple docstring'''
def _A ( snake_case ) -> List[str]:
for i in range(len(snake_case ) - 1 , 0 , -1 ):
_lowercase : Tuple = False
for j in range(snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowercase , _lowercase : Union[str, Any] = unsorted[j - 1], unsorted[j]
_lowercase : Dict = True
for j in range(snake_case ):
if unsorted[j] > unsorted[j + 1]:
_lowercase , _lowercase : Dict = unsorted[j + 1], unsorted[j]
_lowercase : Union[str, Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 250
|
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number | (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _A ( lowercase , lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
| 0
|
"""simple docstring"""
import argparse
import os
import re
_UpperCamelCase : int = 'src/transformers'
# Pattern that looks at the indentation in a line.
_UpperCamelCase : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCamelCase : List[Any] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCamelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCamelCase : int = re.compile(r'\[([^\]]+)\]')
def snake_case (A_ :List[Any] ):
'''simple docstring'''
a : str = _re_indent.search(A_ )
return "" if search is None else search.groups()[0]
def snake_case (A_ :Any , A_ :Optional[Any]="" , A_ :List[str]=None , A_ :List[str]=None ):
'''simple docstring'''
a : Dict = 0
a : Any = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A_ ):
index += 1
a : Tuple = ['\n'.join(lines[:index] )]
else:
a : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a : Optional[int] = [lines[index]]
index += 1
while index < len(A_ ) and (end_prompt is None or not lines[index].startswith(A_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A_ ) )
if index < len(A_ ) - 1:
a : Optional[int] = [lines[index + 1]]
index += 1
else:
a : int = []
else:
blocks.append('\n'.join(A_ ) )
a : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A_ ) > 0:
blocks.append('\n'.join(A_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def snake_case (A_ :Dict ):
'''simple docstring'''
def _inner(A_ :Optional[Any] ):
return key(A_ ).lower().replace('_' , '' )
return _inner
def snake_case (A_ :List[Any] , A_ :List[Any]=None ):
'''simple docstring'''
def noop(A_ :Union[str, Any] ):
return x
if key is None:
a : Union[str, Any] = noop
# Constants are all uppercase, they go first.
a : Any = [obj for obj in objects if key(A_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a : Optional[int] = [obj for obj in objects if key(A_ )[0].isupper() and not key(A_ ).isupper()]
# Functions begin with a lowercase, they go last.
a : int = [obj for obj in objects if not key(A_ )[0].isupper()]
a : List[Any] = ignore_underscore(A_ )
return sorted(A_ , key=A_ ) + sorted(A_ , key=A_ ) + sorted(A_ , key=A_ )
def snake_case (A_ :Dict ):
'''simple docstring'''
def _replace(A_ :str ):
a : int = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
a : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a : Any = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(A_ )] ) + "]"
a : Tuple = import_statement.split('\n' )
if len(A_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a : Dict = 2 if lines[1].strip() == '[' else 1
a : Optional[int] = [(i, _re_strip_line.search(A_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a : List[Any] = sort_objects(A_ , key=lambda A_ : x[1] )
a : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
a : Any = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a : Optional[int] = keys[:-1]
a : List[Any] = get_indent(lines[1] ) + ', '.join([f'''"{k}"''' for k in sort_objects(A_ )] )
return "\n".join(A_ )
else:
# Finally we have to deal with imports fitting on one line
a : Union[str, Any] = _re_bracket_content.sub(_replace , A_ )
return import_statement
def snake_case (A_ :Optional[int] , A_ :Any=True ):
'''simple docstring'''
with open(A_ , encoding='utf-8' ) as f:
a : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a : Optional[int] = split_code_in_indented_blocks(
A_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a : Tuple = main_blocks[block_idx]
a : Tuple = block.split('\n' )
# Get to the start of the imports.
a : Union[str, Any] = 0
while line_idx < len(A_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a : List[Any] = len(A_ )
else:
line_idx += 1
if line_idx >= len(A_ ):
continue
# Ignore beginning and last line: they don't contain anything.
a : List[Any] = '\n'.join(block_lines[line_idx:-1] )
a : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a : Tuple = split_code_in_indented_blocks(A_ , indent_level=A_ )
# We have two categories of import key: list or _import_structure[key].append/extend
a : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a : Any = [(pattern.search(A_ ).groups()[0] if pattern.search(A_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a : Union[str, Any] = [(i, key) for i, key in enumerate(A_ ) if key is not None]
a : Any = [x[0] for x in sorted(A_ , key=lambda A_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a : Any = 0
a : List[str] = []
for i in range(len(A_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
a : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A_ )
count += 1
# And we put our main block back together with its first and last line.
a : Optional[Any] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A_ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A_ ) )
def snake_case (A_ :Optional[int]=True ):
'''simple docstring'''
a : Optional[int] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
a : Dict = sort_imports(os.path.join(A_ , '__init__.py' ) , check_only=A_ )
if result:
a : str = [os.path.join(A_ , '__init__.py' )]
if len(A_ ) > 0:
raise ValueError(f'''Would overwrite {len(A_ )} files, run `make style`.''' )
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_UpperCamelCase : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 350
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BartphoTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : int = {'unk_token': '<unk>'}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a : Optional[int] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Dict , **A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Dict ):
'''simple docstring'''
a : Tuple = 'This is a là test'
a : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = 'This is a là test'
a : int = '▁This ▁is ▁a ▁l à ▁t est'.split()
a : str = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 186
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "philschmid/bart-large-cnn-samsum"
lowercase__ = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
lowercase__ = "summarizer"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = ["text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)[0]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_)
| 136
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : Optional[Any] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase ( _snake_case : List[str] , _snake_case : Any , _snake_case : int=None ) ->Tuple:
"""simple docstring"""
if rng is None:
__snake_case : str = random.Random()
__snake_case : Optional[int] = 1
for dim in shape:
total_dims *= dim
__snake_case : Optional[int] = []
for _ in range(_snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__snake_case : List[str] = np.array(_snake_case , dtype=jnp.intaa ).reshape(_snake_case )
return output
def lowercase ( _snake_case : Optional[int] , _snake_case : List[Any]=None ) ->Any:
"""simple docstring"""
__snake_case : Optional[int] = ids_tensor(_snake_case , vocab_size=2 , rng=_snake_case )
# make sure that at least one token is attended to for each batch
__snake_case : Optional[Any] = 1
return attn_mask
@require_flax
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case : str = 2
__snake_case : Tuple = inputs['''input_ids'''].shape[-1] // 2
__snake_case : int = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__snake_case : int = jnp.ones_like(a_ )
__snake_case : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config()
__snake_case : Optional[Any] = False
__snake_case : Tuple = max_length
__snake_case : int = 0
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case : Any = getattr(a_ , a_ )
__snake_case : Optional[Any] = pt_model_class(a_ ).eval()
__snake_case : Union[str, Any] = load_flax_weights_in_pytorch_model(a_ , flax_model.params )
__snake_case : Optional[Any] = flax_model.generate(a_ ).sequences
__snake_case : Dict = pt_model.generate(torch.tensor(a_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config()
__snake_case : Any = False
__snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : Union[str, Any] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Tuple = jit(model.generate )
__snake_case : Tuple = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = self._get_input_ids_and_config()
__snake_case : Dict = True
__snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : List[str] = model_class(a_ )
__snake_case : Tuple = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Union[str, Any] = jit(model.generate )
__snake_case : List[str] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : str = self._get_input_ids_and_config()
__snake_case : List[str] = False
__snake_case : List[Any] = max_length
__snake_case : List[str] = 2
for model_class in self.all_generative_model_classes:
__snake_case : Optional[int] = model_class(a_ )
__snake_case : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : List[Any] = jit(model.generate )
__snake_case : List[str] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config()
__snake_case : Optional[Any] = False
__snake_case : Dict = max_length
__snake_case : List[str] = 2
__snake_case : List[str] = 2
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = self._get_input_ids_and_config()
__snake_case : List[str] = True
__snake_case : Tuple = max_length
__snake_case : Optional[int] = 0.8
__snake_case : Dict = 10
__snake_case : Tuple = 0.3
__snake_case : List[Any] = 1
__snake_case : Tuple = 8
__snake_case : Optional[int] = 9
for model_class in self.all_generative_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : Any = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Any = jit(model.generate )
__snake_case : int = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : int = self._get_input_ids_and_config()
__snake_case : Optional[int] = max_length
__snake_case : int = 1
__snake_case : Dict = 8
__snake_case : Dict = 9
for model_class in self.all_generative_model_classes:
__snake_case : Tuple = model_class(a_ )
__snake_case : Union[str, Any] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[Any] = jit(model.generate )
__snake_case : Optional[Any] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self._get_input_ids_and_config()
__snake_case : Optional[int] = max_length
__snake_case : Tuple = 2
__snake_case : Any = 1
__snake_case : Any = 8
__snake_case : Any = 9
for model_class in self.all_generative_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : Any = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[Any] = jit(model.generate )
__snake_case : Optional[int] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Optional[int] = attention_mask.at[(0, 0)].set(0 )
__snake_case : str = False
__snake_case : int = max_length
for model_class in self.all_generative_model_classes:
__snake_case : Union[str, Any] = model_class(a_ )
__snake_case : int = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : int = jit(model.generate )
__snake_case : Tuple = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Any = attention_mask.at[(0, 0)].set(0 )
__snake_case : Optional[Any] = True
__snake_case : List[str] = max_length
for model_class in self.all_generative_model_classes:
__snake_case : str = model_class(a_ )
__snake_case : Dict = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Optional[int] = jit(model.generate )
__snake_case : List[Any] = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case : Any = attention_mask.at[(0, 0)].set(0 )
__snake_case : Union[str, Any] = 2
__snake_case : int = max_length
for model_class in self.all_generative_model_classes:
__snake_case : List[str] = model_class(a_ )
__snake_case : Dict = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
__snake_case : Any = jit(model.generate )
__snake_case : Optional[int] = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__snake_case : int = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__snake_case : Union[str, Any] = '''Hello world'''
__snake_case : Optional[Any] = tokenizer(a_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a_ , '''do_samples''' ):
model.generate(a_ , do_samples=a_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a_ , '''foo''' ):
__snake_case : Optional[int] = {'''foo''': '''bar'''}
model.generate(a_ , **a_ )
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple:
__lowerCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__lowerCamelCase = DetaConfig(
backbone_config=__lowerCAmelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__lowerCAmelCase , with_box_refine=__lowerCAmelCase , two_stage=__lowerCAmelCase , )
# set labels
__lowerCamelCase = '''huggingface/label-files'''
if "o365" in model_name:
__lowerCamelCase = 366
__lowerCamelCase = '''object365-id2label.json'''
else:
__lowerCamelCase = 91
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> Optional[Any]:
__lowerCamelCase = dct.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> List[Any]:
__lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__lowerCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:dim, :]
__lowerCamelCase = in_proj_bias[: dim]
__lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCamelCase = in_proj_bias[
dim : dim * 2
]
__lowerCamelCase = in_proj_weight[
-dim :, :
]
__lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> Optional[int]:
# transformer decoder self-attention layers
__lowerCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:hidden_size, :]
__lowerCamelCase = in_proj_bias[:hidden_size]
__lowerCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCamelCase = in_proj_weight[-hidden_size:, :]
__lowerCamelCase = in_proj_bias[-hidden_size:]
def __magic_name__ ( ) -> List[Any]:
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> List[Any]:
__lowerCamelCase = get_deta_config(__lowerCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__lowerCamelCase = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__lowerCAmelCase , param.shape )
# rename keys
__lowerCamelCase = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
if "input_proj" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetaForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
__lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__lowerCAmelCase )
# load image processor
__lowerCamelCase = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__lowerCamelCase = prepare_img()
__lowerCamelCase = processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowerCamelCase = encoding['''pixel_values''']
__lowerCamelCase = model(pixel_values.to(__lowerCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowerCamelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__lowerCamelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__lowerCamelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCAmelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCAmelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 270
|
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), )
def snake_case__ ( self, __a, __a, __a = 1, __a = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a, hypotheses=__a, min_len=__a, max_len=__a)
}
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowerCAmelCase_ ( A_ ,A_ ,A_):
# Base Case
if curr_ind == len(A_):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(A_)):
if valid_connection(A_ ,A_ ,A_ ,A_):
# Insert current vertex into path as next transition
UpperCamelCase__: Dict = next_ver
# Validate created path
if util_hamilton_cycle(A_ ,A_ ,curr_ind + 1):
return True
# Backtrack
UpperCamelCase__: Any = -1
return False
def lowerCAmelCase_ ( A_ ,A_ = 0):
UpperCamelCase__: Tuple = [-1] * (len(A_) + 1)
# initialize start and end of path with starting index
UpperCamelCase__: List[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(A_ ,A_ ,1) else []
| 149
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""flax""", """transformers"""]
def __init__( self: Optional[int] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[str] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Optional[int] , *__lowerCamelCase: str , **__lowerCamelCase: Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: List[str] , **__lowerCamelCase: Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a ( metaclass=UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""flax""", """transformers"""]
def __init__( self: List[str] , *__lowerCamelCase: str , **__lowerCamelCase: int ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: str , *__lowerCamelCase: List[str] , **__lowerCamelCase: str ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a ( metaclass=UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""flax""", """transformers"""]
def __init__( self: List[Any] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Optional[Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: Dict , **__lowerCamelCase: str ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class _a ( metaclass=UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""flax""", """transformers"""]
def __init__( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: List[Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: List[Any] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls: Tuple , *__lowerCamelCase: int , **__lowerCamelCase: Any ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
| 149
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : Tuple = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353
|
from ...configuration_utils import PretrainedConfig
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'bert-generation'
def __init__( self , _lowerCamelCase=5_0358 , _lowerCamelCase=1024 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[int] = vocab_size
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Any = num_attention_heads
a :List[Any] = hidden_act
a :Tuple = intermediate_size
a :Any = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :Dict = max_position_embeddings
a :int = initializer_range
a :Union[str, Any] = layer_norm_eps
a :str = position_embedding_type
a :int = use_cache
| 281
| 0
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a : Tuple = logging.getLogger()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase : List[str] = parser.parse_args()
return args.f
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(snake_case , "argv" , snake_case ):
UpperCAmelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case , 0.666 )
@slow
@require_torch_non_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(snake_case )
UpperCAmelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case )
UpperCAmelCase : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case )
| 311
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 1
|
def lowerCAmelCase_ ( _lowercase : list[int]) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_lowercase , (list, tuple)) or not all(
isinstance(_lowercase , _lowercase) for number in numbers):
raise ValueError("""numbers must be an iterable of integers""")
a__ : Dict = numbers[0]
for i in range(1 , len(_lowercase)):
# update the maximum and minimum subarray products
a__ : Union[str, Any] = numbers[i]
if number < 0:
a__ , a__ : str = min_till_now, max_till_now
a__ : Optional[Any] = max(_lowercase , max_till_now * number)
a__ : Optional[int] = min(_lowercase , min_till_now * number)
# update the maximum product found till now
a__ : List[Any] = max(_lowercase , _lowercase)
return max_prod
| 266
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 218
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : int = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : int = {
"google/rembert": 256,
}
_lowerCamelCase : int = "▁"
class __UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__(self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : Tuple="[SEP]" , _lowerCAmelCase : List[str]="<unk>" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Tuple="[CLS]" , _lowerCAmelCase : Optional[int]="[MASK]" , **_lowerCAmelCase : List[str] , ):
A = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A (self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A (self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def A (self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A (self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
A = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 357
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__lowerCamelCase = [[w, v]]
if not self.graph.get(lowerCamelCase__ ):
__lowerCamelCase = []
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=-2 , lowerCamelCase__=-1 ) -> Dict:
'''simple docstring'''
if s == d:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowercase_ ( self , lowerCamelCase__=-1 ) -> Tuple:
'''simple docstring'''
if c == -1:
__lowerCamelCase = floor(random() * 10_000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowercase_ ( self , lowerCamelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = deque()
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
__lowerCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self , lowerCamelCase__=-2 ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = s
__lowerCamelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return sorted_nodes
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(lowerCamelCase__ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(lowerCamelCase__ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowercase_ ( self , lowerCamelCase__=-2 , lowerCamelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = time()
return end - begin
def lowercase_ ( self , lowerCamelCase__=-2 ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = time()
self.bfs(lowerCamelCase__ )
__lowerCamelCase = time()
return end - begin
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 ) -> Tuple:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__lowerCamelCase = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__lowerCamelCase = [[w, u]]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
# the other way round
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=-2 , lowerCamelCase__=-1 ) -> List[Any]:
'''simple docstring'''
if s == d:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowercase_ ( self , lowerCamelCase__=-1 ) -> str:
'''simple docstring'''
if c == -1:
__lowerCamelCase = floor(random() * 10_000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowercase_ ( self , lowerCamelCase__=-2 ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = deque()
__lowerCamelCase = []
if s == -2:
__lowerCamelCase = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
__lowerCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(lowerCamelCase__ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
__lowerCamelCase = -2
__lowerCamelCase = []
__lowerCamelCase = s
__lowerCamelCase = False
__lowerCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase = True
if len(lowerCamelCase__ ) != 0:
__lowerCamelCase = stack[len(lowerCamelCase__ ) - 1]
else:
__lowerCamelCase = False
indirect_parents.append(lowerCamelCase__ )
__lowerCamelCase = s
__lowerCamelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self , lowerCamelCase__=-2 , lowerCamelCase__=-1 ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = time()
return end - begin
def lowercase_ ( self , lowerCamelCase__=-2 ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = time()
self.bfs(lowerCamelCase__ )
__lowerCamelCase = time()
return end - begin
| 90
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = DebertaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 90
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
# configuration for running training on smdistributed Model Parallel
_UpperCAmelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_UpperCAmelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_UpperCAmelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_UpperCAmelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ):
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] ):
# create estimator
_UpperCAmelCase = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase = 'docs/source/en/_toctree.yml'
def _snake_case ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = defaultdict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowercase__ )
lowerCAmelCase_ :int = new_doc_list
lowerCAmelCase_ :str = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ :Tuple = []
for duplicate_key in duplicates:
lowerCAmelCase_ :Any = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowerCAmelCase_ :int = sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowercase__ )
# Sort
return overview_doc
def _snake_case ( lowercase__ : Optional[Any]=False ) -> str:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :List[str] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase_ :Dict = api_doc[scheduler_idx]["""sections"""]
lowerCAmelCase_ :Optional[Any] = clean_doc_toc(lowercase__ )
lowerCAmelCase_ :str = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase_ :Optional[int] = True
if overwrite:
lowerCAmelCase_ :Tuple = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase_ :str = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _snake_case ( lowercase__ : Any=False ) -> int:
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :int = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ :Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ :Optional[int] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase_ :List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase_ :Optional[int] = False
lowerCAmelCase_ :Any = api_doc[pipeline_idx]["""sections"""]
lowerCAmelCase_ :str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase_ :int = pipeline_doc["""section"""]
lowerCAmelCase_ :Tuple = clean_doc_toc(lowercase__ )
if overwrite:
lowerCAmelCase_ :List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase__ )
# sort overall pipeline doc
lowerCAmelCase_ :Union[str, Any] = clean_doc_toc(lowercase__ )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase_ :Tuple = True
if overwrite:
lowerCAmelCase_ :Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase_ :Tuple = api_doc
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84
| 1
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase, '''Please use tf.data to implement this functionality.''' )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_a = _readaa(_lowerCAmelCase )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_a = _readaa(_lowerCAmelCase )
_a = _readaa(_lowerCAmelCase )
_a = _readaa(_lowerCAmelCase )
_a = bytestream.read(rows * cols * num_images )
_a = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
_a = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 )
return data
@deprecated(_lowerCAmelCase, '''Please use tf.one_hot on tensors.''' )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = labels_dense.shape[0]
_a = numpy.arange(_lowerCAmelCase ) * num_classes
_a = numpy.zeros((num_labels, num_classes) )
_a = 1
return labels_one_hot
@deprecated(_lowerCAmelCase, '''Please use tf.data to implement this functionality.''' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Tuple=10 ):
"""simple docstring"""
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_a = _readaa(_lowerCAmelCase )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_a = _readaa(_lowerCAmelCase )
_a = bytestream.read(_lowerCAmelCase )
_a = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase )
return labels
class __lowerCamelCase :
'''simple docstring'''
@deprecated(
_a , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) -> List[str]:
_a = random_seed.get_seed(_a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_a = dtypes.as_dtype(_a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_a = 10000
_a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
_a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_a = images.astype(numpy.floataa )
_a = numpy.multiply(_a , 1.0 / 255.0 )
_a = images
_a = labels
_a = 0
_a = 0
@property
def _UpperCAmelCase ( self ) -> Dict:
return self._images
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return self._labels
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self._num_examples
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self._epochs_completed
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Union[str, Any]:
if fake_data:
_a = [1] * 784
_a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_a )],
[fake_label for _ in range(_a )],
)
_a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_a = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
_a = self.images[perma]
_a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_a = self._num_examples - start
_a = self._images[start : self._num_examples]
_a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_a = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
_a = self.images[perm]
_a = self.labels[perm]
# Start next epoch
_a = 0
_a = batch_size - rest_num_examples
_a = self._index_in_epoch
_a = self._images[start:end]
_a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase, '''Please write your own downloading logic.''' )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
_a = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
_a = f.size()
print('''Successfully downloaded''', _lowerCAmelCase, _lowerCAmelCase, '''bytes.''' )
return filepath
@deprecated(
_lowerCAmelCase, '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[int]=dtypes.floataa, _lowerCAmelCase : Union[str, Any]=True, _lowerCAmelCase : Optional[int]=50_00, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : Union[str, Any]=DEFAULT_SOURCE_URL, ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase )
_a = fake()
_a = fake()
_a = fake()
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
if not source_url: # empty string check
_a = DEFAULT_SOURCE_URL
_a = "train-images-idx3-ubyte.gz"
_a = "train-labels-idx1-ubyte.gz"
_a = "t10k-images-idx3-ubyte.gz"
_a = "t10k-labels-idx1-ubyte.gz"
_a = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file )
with gfile.Open(_lowerCAmelCase, '''rb''' ) as f:
_a = _extract_images(_lowerCAmelCase )
_a = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase, '''rb''' ) as f:
_a = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
_a = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file )
with gfile.Open(_lowerCAmelCase, '''rb''' ) as f:
_a = _extract_images(_lowerCAmelCase )
_a = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase, '''rb''' ) as f:
_a = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
_a = (
"Validation size should be between 0 and "
f'{len(_lowerCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_lowerCAmelCase )
_a = train_images[:validation_size]
_a = train_labels[:validation_size]
_a = train_images[validation_size:]
_a = train_labels[validation_size:]
_a = {"dtype": dtype, "reshape": reshape, "seed": seed}
_a = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_a = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_a = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
| 370
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
| 153
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = {}
UpperCAmelCase__ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase__ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase__ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase__ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase__ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase__ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase__ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase__ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__A )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__A )
}
for i in range(__A ):
UpperCAmelCase__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase__ = renew_vae_attention_paths(__A )
UpperCAmelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
conv_attn_to_linear(__A )
for i in range(__A ):
UpperCAmelCase__ = num_up_blocks - 1 - i
UpperCAmelCase__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase__ = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase__ = renew_vae_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
UpperCAmelCase__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase__ = renew_vae_attention_paths(__A )
UpperCAmelCase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
conv_attn_to_linear(__A )
return new_checkpoint
def lowerCAmelCase_ ( __A, __A, ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase__ = io.BytesIO(r.content )
UpperCAmelCase__ = OmegaConf.load(__A )
UpperCAmelCase__ = 512
UpperCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase__ = {}
with safe_open(__A, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase__ = f.get_tensor(__A )
else:
UpperCAmelCase__ = torch.load(__A, map_location=__A )["state_dict"]
# Convert the VAE model.
UpperCAmelCase__ = create_vae_diffusers_config(__A, image_size=__A )
UpperCAmelCase__ = custom_convert_ldm_vae_checkpoint(__A, __A )
UpperCAmelCase__ = AutoencoderKL(**__A )
vae.load_state_dict(__A )
vae.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
UpperCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 65
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case__ : List[str] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case__ : Optional[Any] = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict ):
lowerCAmelCase : List[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCAmelCase : Optional[Any] = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _snake_case , )
is not None
):
lowerCAmelCase : Optional[int] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase : List[str] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowerCAmelCase : List[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowerCAmelCase : List[str] = True
if not attribute_used:
lowerCAmelCase : Optional[int] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase : Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase : Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
lowerCAmelCase : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Tuple = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowerCAmelCase : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase : Any = inspect.getsourcefile(_snake_case )
lowerCAmelCase : Dict = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase : int = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowerCAmelCase : Dict = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase : Optional[int] = []
for config_param, default_value in zip(_snake_case , _snake_case ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def _snake_case ( ):
lowerCAmelCase : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase : Dict = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case , _snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase : str = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : Tuple = unused_attributes
if len(_snake_case ) > 0:
lowerCAmelCase : Tuple = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 314
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A ={
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self ):
snake_case_ , snake_case_ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ , snake_case_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ = controlnet_params
snake_case_ = 'bird'
snake_case_ = jax.device_count()
snake_case_ = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
snake_case_ = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = replicate(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def a ( self ):
snake_case_ , snake_case_ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ , snake_case_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ = controlnet_params
snake_case_ = 'Chef in the kitchen'
snake_case_ = jax.device_count()
snake_case_ = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
snake_case_ = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = replicate(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 200
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :Dict ):
A = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1e-3 ) )
@slow
def lowerCamelCase ( self :Tuple ):
A = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1e-3 ) )
| 292
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292
| 1
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : int = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
snake_case : List[Any] = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __lowerCamelCase )
if matches:
snake_case : List[Any] = float(matches[1] )
snake_case : Any = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case : Dict = 1001
snake_case : Any = "imagenet-1k-id2label.json"
snake_case : Optional[Any] = "huggingface/label-files"
snake_case : Any = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
snake_case : int = {int(__lowerCamelCase ) + 1: v for k, v in idalabel.items()}
snake_case : Tuple = "background"
snake_case : Optional[int] = idalabel
snake_case : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ):
snake_case : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Union[str, Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int=False ):
snake_case : Optional[Any] = get_mobilenet_va_config(__lowerCamelCase )
# Load 🤗 model
snake_case : Optional[Any] = MobileNetVaForImageClassification(__lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case : Any = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
snake_case : Dict = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case : str = model(**__lowerCamelCase )
snake_case : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
snake_case : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case : Optional[int] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
snake_case : Optional[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
snake_case : str = "google/" + model_name
image_processor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 10
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
A_ : int = 10_000
A_ : Optional[List[str]] = None
A_ : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
A_ : Optional[Any] = ParquetConfig
def a (self : Optional[int] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a (self : List[str] , a__ : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
__snake_case = data_files
if isinstance(a__ , a__ ):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , '''rb''' ) as f:
__snake_case = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) )
return splits
def a (self : str , a__ : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def a (self : Any , a__ : Dict ):
"""simple docstring"""
__snake_case = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , '''rb''' ) as f:
__snake_case = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a__ )}: {e}""" )
raise
| 24
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = None
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = True
lowercase = None
lowercase = 1
lowercase = None
lowercase = False
lowercase = None
lowercase = None
def _lowercase( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(A ) for k, v in self.__dict__.items()} )
| 338
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338
| 1
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase : str = sys.version_info >= (3, 10)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple=None , lowercase : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = None
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''titi'''
UpperCamelCase = '''toto'''
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''titi'''
UpperCamelCase = '''toto'''
UpperCamelCase = 42
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = '''toto'''
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicEnum(self.foo )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = '''toto'''
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = MixedTypeEnum(self.foo )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''help message'''} )
UpperCamelCase = None
UpperCamelCase = list_field(default=[] )
UpperCamelCase = list_field(default=[] )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = list_field(default=[] )
UpperCamelCase = list_field(default=[1, 2, 3] )
UpperCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
UpperCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field()
UpperCamelCase = field()
UpperCamelCase = field()
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicEnum(self.required_enum )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field()
UpperCamelCase = None
UpperCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
UpperCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = None
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''help message'''} )
UpperCamelCase = None
UpperCamelCase = list_field(default=[] )
UpperCamelCase = list_field(default=[] )
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int , A_ : argparse.ArgumentParser , A_ : argparse.ArgumentParser ) -> Dict:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase_ = {k: v for k, v in vars(__UpperCamelCase ).items() if k != 'container'}
lowerCamelCase_ = {k: v for k, v in vars(__UpperCamelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __UpperCamelCase ) and yy.get('choices' , __UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__UpperCamelCase ) , yy['type'](__UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('--bar' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('--baz' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('--flag' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((lowerCamelCase_ ) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase )
self.assertFalse(example.flag )
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__UpperCamelCase )
expected.add_argument('--baz' , default='toto' , type=__UpperCamelCase , help='help message' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' )
expected.add_argument('--baz' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__UpperCamelCase , dest='baz' )
expected.add_argument('--opt' , type=__UpperCamelCase , default=__UpperCamelCase )
lowerCamelCase_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
lowerCamelCase_ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
lowerCamelCase_ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
lowerCamelCase_ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
lowerCamelCase_ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase_ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase_ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase_ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
lowerCamelCase_ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = '''toto'''
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase_ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase_ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__UpperCamelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__UpperCamelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCamelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_args([] )
self.assertEqual(
__UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase_ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('--bar' , default=__UpperCamelCase , type=__UpperCamelCase , help='help message' )
expected.add_argument('--baz' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__UpperCamelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=__UpperCamelCase )
lowerCamelCase_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) )
lowerCamelCase_ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('--required_str' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCamelCase , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCamelCase , )
expected.add_argument('--opt' , type=__UpperCamelCase , default=__UpperCamelCase )
expected.add_argument('--baz' , default='toto' , type=__UpperCamelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowerCamelCase_ = parser.parse_dict(__UpperCamelCase )[0]
lowerCamelCase_ = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(__UpperCamelCase , 'temp_json' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCamelCase_ = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
lowerCamelCase_ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(__UpperCamelCase , 'temp_yaml' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCamelCase_ = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = HfArgumentParser(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 204
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = (DPMSolverSinglestepScheduler,)
UpperCamelCase__ = (("""num_inference_steps""", 25),)
def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any:
_UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__UpperCamelCase )
return config
def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Any )->Union[str, Any]:
pass
def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict:
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]:
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 1_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = 5_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def lowercase__ ( self : Dict )->Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowercase__ ( self : str )->Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowercase__ ( self : Union[str, Any] )->int:
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def lowercase__ ( self : str )->str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Tuple:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def lowercase__ ( self : Dict )->List[str]:
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def lowercase__ ( self : Dict )->str:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowercase__ ( self : List[str] )->int:
self.check_over_configs(variance_type=__UpperCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def lowercase__ ( self : List[str] )->Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def lowercase__ ( self : int )->List[Any]:
_UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def lowercase__ ( self : Optional[Any] )->Dict:
_UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 1_0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 260
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 60
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE :int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __magic_name__ :
UpperCamelCase_ :str = PegasusConfig
UpperCamelCase_ :List[str] = {}
UpperCamelCase_ :str = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , )-> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Tuple:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Tuple:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase_ :Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Any = False
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxPegasusModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCamelCase_ = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase , _lowercase=None , **_lowercase ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCamelCase_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase , _lowercase , _lowercase ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self )-> int:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_lowercase )
UpperCamelCase_ = np.ones((1, 1) )
UpperCamelCase_ = model(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCamelCase_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="np" , truncation=_lowercase , max_length=512 , padding=_lowercase )
UpperCamelCase_ = model.generate(**_lowercase , num_beams=2 ).sequences
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
assert tgt_text == decoded
| 60
| 1
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[Any] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase_( snake_case : List[Any] , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : Tuple , snake_case : List[str] ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCamelCase_( snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = os.path.join(__lowerCamelCase , "tmp.sql" )
snake_case_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
snake_case_ = iter_sql_file(__lowerCamelCase )
snake_case_ = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase_( snake_case : Optional[int] , snake_case : str , snake_case : Any ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = os.path.join(__lowerCamelCase , "tmp.sql" )
snake_case_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
snake_case_ = iter_sql_file(__lowerCamelCase )
snake_case_ = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase_( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = os.path.join(__lowerCamelCase , "tmp.sql" )
snake_case_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 85
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__snake_case : Optional[int] = 1_0_2_4
__snake_case : List[Any] = 4_0_9_6
__snake_case : List[Any] = 2_4
__snake_case : Optional[Any] = 1_6
__snake_case : str = [5, 1_1, 1_7, 2_3]
__snake_case : List[str] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__snake_case : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__snake_case : Tuple = 7_6_8
__snake_case : Any = [1, 1, 1, 0.5]
__snake_case : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__snake_case : Any = 1_5_0
__snake_case : Optional[Any] = 1_6
__snake_case : List[str] = (1, 3_8_4, 3_8_4)
__snake_case : Tuple = False
__snake_case : Optional[Any] = "project"
if "ade" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : List[str] = 7_6_8
__snake_case : int = [1, 1, 1, 0.5]
__snake_case : Any = 1_5_0
__snake_case : Tuple = 1_6
__snake_case : List[str] = "huggingface/label-files"
__snake_case : Union[str, Any] = "ade20k-id2label.json"
__snake_case : List[str] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__snake_case : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__snake_case : Optional[Any] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__snake_case : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__snake_case : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__snake_case : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
__snake_case : int = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__snake_case : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : Any = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__snake_case : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__snake_case : Dict = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__snake_case : Union[str, Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__snake_case : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__snake_case : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__snake_case : List[str] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__snake_case : Optional[int] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__snake_case : Optional[int] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case : int = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__snake_case : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__snake_case : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__snake_case : List[str] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__snake_case : str = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case : List[str] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__snake_case : int = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__snake_case : Optional[Any] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__snake_case : Union[str, Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
__snake_case : Tuple = name.replace("bn" , "batch_norm" )
if "head" in name:
__snake_case : Dict = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__snake_case : Optional[int] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__snake_case : Tuple = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__snake_case : str = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__snake_case : Tuple = name.replace(".." , "." )
if "stem.conv" in name:
__snake_case : int = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__snake_case : Any = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__snake_case : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__snake_case : List[Any] = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__snake_case : Optional[int] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case : Optional[Any] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__snake_case : Any = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[: config.hidden_size, :]
__snake_case : List[Any] = in_proj_bias[: config.hidden_size]
__snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Optional[int] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__snake_case : Optional[int] = state_dict.pop(__lowerCamelCase )
__snake_case : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__snake_case : Dict = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__snake_case : str = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__snake_case : Any = DPTImageProcessor(size=__lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
__snake_case : Dict = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
__snake_case : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_snake_case : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
lowerCamelCase__ = {
"camembert-base": 512,
}
lowerCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :int = ["input_ids", "attention_mask"]
def __init__( self : Dict , __a : int , __a : Optional[Any]="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : Optional[int]="<s>" , __a : str="<unk>" , __a : Dict="<pad>" , __a : List[str]="<mask>" , __a : int=["<s>NOTUSED", "</s>NOTUSED"] , __a : Optional[Dict[str, Any]] = None , **__a : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_UpperCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_UpperCamelCase : List[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_UpperCamelCase : Optional[Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_UpperCamelCase : Optional[int] = len(self.fairseq_tokens_to_ids )
_UpperCamelCase : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_UpperCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : str = [self.cls_token_id]
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
_UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = []
else:
current_sub_tokens.append(__a )
_UpperCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : List[Any] ) -> Dict:
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : int = None
return state
def __setstate__( self : Optional[int] , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Any = {}
_UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Union[str, Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 310
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 1
|
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = val
__lowercase = None
__lowercase = None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__lowercase = Node(lowerCAmelCase__ )
else:
self.left.insert(lowerCAmelCase__ )
elif val > self.val:
if self.right is None:
__lowercase = Node(lowerCAmelCase__ )
else:
self.right.insert(lowerCAmelCase__ )
else:
__lowercase = val
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if root:
inorder(root.left , lowercase )
res.append(root.val )
inorder(root.right , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) == 0:
return arr
__lowercase = Node(arr[0] )
for i in range(1 , len(lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__lowercase = []
inorder(lowercase , lowercase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 210
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
| 1
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase : int = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase : Union[str, Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A__ )[0]
@deprecated(A__ , 'Please use tf.data to implement this functionality.' )
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
UpperCamelCase = _readaa(A__ )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
UpperCamelCase = _readaa(A__ )
UpperCamelCase = _readaa(A__ )
UpperCamelCase = _readaa(A__ )
UpperCamelCase = bytestream.read(rows * cols * num_images )
UpperCamelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
UpperCamelCase = data.reshape(A__ , A__ , A__ , 1 )
return data
@deprecated(A__ , 'Please use tf.one_hot on tensors.' )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = labels_dense.shape[0]
UpperCamelCase = numpy.arange(A__ ) * num_classes
UpperCamelCase = numpy.zeros((num_labels, num_classes) )
UpperCamelCase = 1
return labels_one_hot
@deprecated(A__ , 'Please use tf.data to implement this functionality.' )
def __lowerCamelCase ( A__ , A__=False , A__=10 ) -> str:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
UpperCamelCase = _readaa(A__ )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
UpperCamelCase = _readaa(A__ )
UpperCamelCase = bytestream.read(A__ )
UpperCamelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A__ , A__ )
return labels
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@deprecated(
UpperCamelCase__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=dtypes.floataa , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=None , ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
UpperCamelCase = 1_0_0_0_0
UpperCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase = images.astype(numpy.floataa )
UpperCamelCase = numpy.multiply(UpperCamelCase__ , 1.0 / 2_5_5.0 )
UpperCamelCase = images
UpperCamelCase = labels
UpperCamelCase = 0
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
return self._images
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self._labels
@property
def A ( self : Tuple ):
"""simple docstring"""
return self._num_examples
@property
def A ( self : Dict ):
"""simple docstring"""
return self._epochs_completed
def A ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int=False , UpperCamelCase__ : str=True ):
"""simple docstring"""
if fake_data:
UpperCamelCase = [1] * 7_8_4
UpperCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
UpperCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
UpperCamelCase = self.images[perma]
UpperCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase = self._num_examples - start
UpperCamelCase = self._images[start : self._num_examples]
UpperCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
UpperCamelCase = self.images[perm]
UpperCamelCase = self.labels[perm]
# Start next epoch
UpperCamelCase = 0
UpperCamelCase = batch_size - rest_num_examples
UpperCamelCase = self._index_in_epoch
UpperCamelCase = self._images[start:end]
UpperCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A__ , 'Please write your own downloading logic.' )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
if not gfile.Exists(A__ ):
gfile.MakeDirs(A__ )
UpperCamelCase = os.path.join(A__ , A__ )
if not gfile.Exists(A__ ):
urllib.request.urlretrieve(A__ , A__ ) # noqa: S310
with gfile.GFile(A__ ) as f:
UpperCamelCase = f.size()
print('Successfully downloaded' , A__ , A__ , 'bytes.' )
return filepath
@deprecated(
A__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __lowerCamelCase ( A__ , A__=False , A__=False , A__=dtypes.floataa , A__=True , A__=5_000 , A__=None , A__=DEFAULT_SOURCE_URL , ) -> int:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A__ , one_hot=A__ , dtype=A__ , seed=A__ )
UpperCamelCase = fake()
UpperCamelCase = fake()
UpperCamelCase = fake()
return _Datasets(train=A__ , validation=A__ , test=A__ )
if not source_url: # empty string check
UpperCamelCase = DEFAULT_SOURCE_URL
UpperCamelCase = 'train-images-idx3-ubyte.gz'
UpperCamelCase = 'train-labels-idx1-ubyte.gz'
UpperCamelCase = 't10k-images-idx3-ubyte.gz'
UpperCamelCase = 't10k-labels-idx1-ubyte.gz'
UpperCamelCase = _maybe_download(
A__ , A__ , source_url + train_images_file )
with gfile.Open(A__ , 'rb' ) as f:
UpperCamelCase = _extract_images(A__ )
UpperCamelCase = _maybe_download(
A__ , A__ , source_url + train_labels_file )
with gfile.Open(A__ , 'rb' ) as f:
UpperCamelCase = _extract_labels(A__ , one_hot=A__ )
UpperCamelCase = _maybe_download(
A__ , A__ , source_url + test_images_file )
with gfile.Open(A__ , 'rb' ) as f:
UpperCamelCase = _extract_images(A__ )
UpperCamelCase = _maybe_download(
A__ , A__ , source_url + test_labels_file )
with gfile.Open(A__ , 'rb' ) as f:
UpperCamelCase = _extract_labels(A__ , one_hot=A__ )
if not 0 <= validation_size <= len(A__ ):
UpperCamelCase = (
'Validation size should be between 0 and '
F"""{len(A__ )}. Received: {validation_size}."""
)
raise ValueError(A__ )
UpperCamelCase = train_images[:validation_size]
UpperCamelCase = train_labels[:validation_size]
UpperCamelCase = train_images[validation_size:]
UpperCamelCase = train_labels[validation_size:]
UpperCamelCase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
UpperCamelCase = _DataSet(A__ , A__ , **A__ )
UpperCamelCase = _DataSet(A__ , A__ , **A__ )
UpperCamelCase = _DataSet(A__ , A__ , **A__ )
return _Datasets(train=A__ , validation=A__ , test=A__ )
| 351
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XGLMConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=1_4 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A ( self : Dict ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] , UpperCamelCase__ : Tuple=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 249
| 0
|
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
"""simple docstring"""
if attention_mask is None:
A_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase :
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=99 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Any=2 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[Any]=0.02 , ):
"""simple docstring"""
A_ : Any = parent
A_ : Any = batch_size
A_ : Optional[Any] = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_labels
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Tuple = eos_token_id
A_ : int = pad_token_id
A_ : int = bos_token_id
A_ : str = initializer_range
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A_ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A_ : Optional[Any] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
A_ : Any = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ , A_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = 20
A_ : Any = model_class_name(_lowerCamelCase )
A_ : List[Any] = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : int = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
A_ : str = model.decode(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def a_ ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = 20
A_ : Dict = model_class_name(_lowerCamelCase )
A_ : Dict = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ : Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Tuple = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
A_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Dict = 99
def a_ ( self : str ):
"""simple docstring"""
A_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A_ : List[str] = input_ids.shape[0]
A_ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = self._get_config_and_data()
A_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : Optional[int] = lm_model(input_ids=_lowerCamelCase )
A_ : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A_ : Dict = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
A_ : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A_ : Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
A_ : Tuple = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase):
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : List[str] ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A_ : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : List[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
A_ : Union[str, Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : Optional[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : str = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A_ : str = np.ones((1, 1) ) * model.config.eos_token_id
A_ : List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 167
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
@register_to_config
def __init__( self : str , UpperCamelCase__ : int = 768 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def __A ( self : List[str] , UpperCamelCase__ : Optional[Union[str, torch.device]] = None , UpperCamelCase__ : Optional[torch.dtype] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def __A ( self : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (embeds * self.std) + self.mean
return embeds
| 258
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 258
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCAmelCase : Any = 0
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : Dict = 3
lowerCAmelCase : List[Any] = 4
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = '''left'''
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : Optional[int]="</s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : List[str]="<sep>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : int="<cls>" , lowerCAmelCase__ : List[str]="<mask>" , lowerCAmelCase__ : List[Any]=["<eop>", "<eod>"] , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
SCREAMING_SNAKE_CASE_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Dict = 3
SCREAMING_SNAKE_CASE_: List[str] = do_lower_case
SCREAMING_SNAKE_CASE_: List[Any] = remove_space
SCREAMING_SNAKE_CASE_: int = keep_accents
SCREAMING_SNAKE_CASE_: Tuple = vocab_file
SCREAMING_SNAKE_CASE_: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return len(self.sp_model)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_: Optional[int] = None
return state
def __setstate__( self : Tuple , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
SCREAMING_SNAKE_CASE_: List[Any] = {}
SCREAMING_SNAKE_CASE_: int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Any):
if self.remove_space:
SCREAMING_SNAKE_CASE_: int = " ".join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE_: int = inputs
SCREAMING_SNAKE_CASE_: Tuple = outputs.replace("``" , "\"").replace("''" , "\"")
if not self.keep_accents:
SCREAMING_SNAKE_CASE_: List[str] = unicodedata.normalize("NFKD" , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.lower()
return outputs
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = self.preprocess_text(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = []
for piece in pieces:
if len(lowerCAmelCase__) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_: List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE_: List[str] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCAmelCase__)
else:
new_pieces.append(lowerCAmelCase__)
return new_pieces
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Dict):
return self.sp_model.PieceToId(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Dict):
return self.sp_model.IdToPiece(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = "".join(lowerCAmelCase__).replace(lowerCAmelCase__ , " ").strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("use_source_tokenizer" , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Dict = []
sub_texts.append(lowerCAmelCase__)
else:
current_sub_text.append(lowerCAmelCase__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_: Union[str, Any] = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_: Dict = self.clean_up_tokenization(lowerCAmelCase__)
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1, 1]
return ([0] * len(lowerCAmelCase__)) + [1, 1]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__ , "wb") as fi:
SCREAMING_SNAKE_CASE_: Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
| 13
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A__ : int , A__ : List[str]=7 , A__ : Tuple=3 , A__ : List[str]=10 , A__ : Optional[int]=18 , A__ : int=30 , A__ : Tuple=400 , A__ : Dict=True , A__ : str=None , A__ : str=True , A__ : List[str]=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : List[Any]=None , ) -> int:
_snake_case = size if size is not None else {'''shortest_edge''': 18}
_snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = crop_size
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCamelCase_ ( self : int ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Any ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 278
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( lowerCAmelCase_ ):
UpperCamelCase : List[Any] = (DPMSolverSinglestepScheduler,)
UpperCamelCase : Tuple = (('num_inference_steps', 2_5),)
def lowerCamelCase__ ( self : Optional[int] , **lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float("""inf""" ),
'''variance_type''': None,
}
config.update(**lowerCAmelCase )
return config
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_: str =kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.dummy_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.1 * sample
SCREAMING_SNAKE_CASE_: Optional[int] =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_: str =self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_: Any =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_: Tuple =dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_: Optional[int] =sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE_: Tuple =scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_: Optional[Any] =new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str=0 , **lowerCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.dummy_sample
SCREAMING_SNAKE_CASE_: Dict =0.1 * sample
SCREAMING_SNAKE_CASE_: Dict =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_: int =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_: Dict =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_: List[Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_: Any =dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_: List[Any] =scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_: Any =new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Dict ) -> int:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_: List[str] =self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_: int =self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =10
SCREAMING_SNAKE_CASE_: Any =self.dummy_model()
SCREAMING_SNAKE_CASE_: Optional[int] =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_: Tuple =50
SCREAMING_SNAKE_CASE_: Dict =self.dummy_model()
SCREAMING_SNAKE_CASE_: Any =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_: Optional[int] =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_: List[Any] =self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
SCREAMING_SNAKE_CASE_: List[str] =DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_: Tuple =DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_: List[str] =UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_: Any =DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_: Optional[int] =self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] =self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type="""learned_range""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.full_loop()
SCREAMING_SNAKE_CASE_: int =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.full_loop(use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE_: List[str] =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_: Any =self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE_: int =scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =10
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_: Dict =self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_: int =model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 173
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict="pt" ):
"""simple docstring"""
lowerCamelCase__ : str ={'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
lowerCamelCase__ : int =padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str="train", lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : List[str]=None, lowerCamelCase : int="", )-> List[Any]:
super().__init__()
lowerCamelCase__ : Tuple =Path(lowerCamelCase ).joinpath(type_path + '''.source''' )
lowerCamelCase__ : str =Path(lowerCamelCase ).joinpath(type_path + '''.target''' )
lowerCamelCase__ : Dict =self.get_char_lens(self.src_file )
lowerCamelCase__ : Tuple =max_source_length
lowerCamelCase__ : Optional[int] =max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
lowerCamelCase__ : Dict =tokenizer
lowerCamelCase__ : List[str] =prefix
if n_obs is not None:
lowerCamelCase__ : int =self.src_lens[:n_obs]
lowerCamelCase__ : Dict =src_lang
lowerCamelCase__ : Tuple =tgt_lang
def __len__( self : Dict )-> Optional[int]:
return len(self.src_lens )
def __getitem__( self : List[str], lowerCamelCase : Optional[int] )-> Dict[str, torch.Tensor]:
lowerCamelCase__ : List[Any] =index + 1 # linecache starts at 1
lowerCamelCase__ : Optional[int] =self.prefix + linecache.getline(str(self.src_file ), lowerCamelCase ).rstrip('''\n''' )
lowerCamelCase__ : Optional[Any] =linecache.getline(str(self.tgt_file ), lowerCamelCase ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer, lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase__ : Optional[int] =(
self.tokenizer.question_encoder if isinstance(self.tokenizer, lowerCamelCase ) else self.tokenizer
)
lowerCamelCase__ : Tuple =self.tokenizer.generator if isinstance(self.tokenizer, lowerCamelCase ) else self.tokenizer
lowerCamelCase__ : Optional[int] =encode_line(lowerCamelCase, lowerCamelCase, self.max_source_length, '''right''' )
lowerCamelCase__ : str =encode_line(lowerCamelCase, lowerCamelCase, self.max_target_length, '''right''' )
lowerCamelCase__ : str =source_inputs['''input_ids'''].squeeze()
lowerCamelCase__ : str =target_inputs['''input_ids'''].squeeze()
lowerCamelCase__ : Union[str, Any] =source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( lowerCamelCase : Union[str, Any] )-> Optional[int]:
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def snake_case ( self : str, lowerCamelCase : str )-> Dict[str, torch.Tensor]:
lowerCamelCase__ : List[Any] =torch.stack([x['''input_ids'''] for x in batch] )
lowerCamelCase__ : int =torch.stack([x['''attention_mask'''] for x in batch] )
lowerCamelCase__ : Union[str, Any] =torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCamelCase__ : str =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : List[str] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, lowerCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : Optional[int] =trim_batch(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Any =trim_batch(lowerCamelCase, lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] ={
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_lowercase : Any = getLogger(__name__)
def snake_case__ ( __lowerCamelCase : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Dict =get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=4 , **__lowerCamelCase : int ):
"""simple docstring"""
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =git.Repo(search_parent_directories=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case__ ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ):
"""simple docstring"""
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
def remove_articles(__lowerCamelCase : List[Any] ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : Any ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : Tuple =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =normalize_answer(__lowerCamelCase ).split()
lowerCamelCase__ : List[str] =normalize_answer(__lowerCamelCase ).split()
lowerCamelCase__ : Optional[int] =Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase__ : Dict =1.0 * num_same / len(__lowerCamelCase )
lowerCamelCase__ : List[str] =1.0 * num_same / len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =(2 * precision * recall) / (precision + recall)
return fa
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowerCamelCase__ : Any =0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase__ : Optional[int] ='''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
lowerCamelCase__ : List[Any] =p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 238
| 0
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
_validate_point(lowercase__ )
_validate_point(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
if point:
if isinstance(lowercase__ , lowercase__ ):
for item in point:
if not isinstance(lowercase__ , (int, float) ):
a : Any = (
"Expected a list of numbers as input, found "
F'''{type(lowercase__ ).__name__}'''
)
raise TypeError(lowercase__ )
else:
a : str = F'''Expected a list of numbers as input, found {type(lowercase__ ).__name__}'''
raise TypeError(lowercase__ )
else:
raise ValueError("Missing an input" )
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
_validate_point(lowercase__ )
_validate_point(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowercase__ , lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Union[str, Any] = torch.nn.Linear(2 , 4 )
a : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0 )
a : Union[str, Any] = torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
a : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( _a ):
"""simple docstring"""
@require_cuda
def __snake_case ( self : Any):
a : List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase):
a : Any = Accelerator(cpu=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : str = Accelerator()
a : Optional[Any] = GradientState()
assert state.num_steps == 1
a : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self : str):
a : int = Accelerator()
a , a , a , a , a : Tuple = create_components()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def __snake_case ( self : Dict):
a : Dict = Accelerator()
a , a , a , a , a : Any = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def __snake_case ( self : int):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
pass
with patch("torch.cuda.set_device" , __UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
a : int = Accelerator()
self.assertEqual(str(accelerator.state.device) , "cuda:64")
def __snake_case ( self : List[str]):
a : Tuple = Accelerator()
a , a , a , a , a : Optional[Any] = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Dict = get_signature(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
def __snake_case ( self : Optional[int]):
a : str = Accelerator()
a , a , a , a , a : Dict = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = get_signature(__UpperCAmelCase)
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : Tuple = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , "data.json") , "w") as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
# loading hook
def load_config(__UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]):
with open(os.path.join(__UpperCAmelCase , "data.json") , "r") as f:
a : Optional[Any] = json.load(__UpperCAmelCase)
a : Tuple = config["class_name"]
a : Optional[int] = accelerator.register_save_state_pre_hook(__UpperCAmelCase)
a : Tuple = accelerator.register_load_state_pre_hook(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : int = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : Dict = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def __snake_case ( self : Optional[Any]):
a : List[str] = Accelerator()
a , a , a , a , a : int = create_components()
a : Tuple = None
# This should work
a , a , a , a , a , a : Any = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def __snake_case ( self : List[str]):
a : str = Accelerator()
a , a , a , a , a : List[Any] = create_components()
a : Union[str, Any] = [1, 2, 3]
# This should work
a , a , a , a , a , a : str = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map={"": 0} , )
a : Tuple = Accelerator()
# This should work
a : List[Any] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Dict = Accelerator()
with init_empty_weights():
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Union[str, Any] = infer_auto_device_map(__UpperCAmelCase)
a : str = "cpu"
a : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Any = infer_auto_device_map(__UpperCAmelCase)
a : Dict = 1
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : int = Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Tuple):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a : Tuple = infer_auto_device_map(__UpperCAmelCase)
a : str = 1
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : str = Accelerator()
# This should work
a : Any = accelerator.prepare(__UpperCAmelCase)
@require_cuda
def __snake_case ( self : List[Any]):
a : Tuple = torch.nn.Linear(10 , 10)
a : int = torch.optim.SGD(model.parameters() , lr=0.01)
a : Optional[Any] = Accelerator(cpu=__UpperCAmelCase)
a : List[str] = accelerator.prepare(__UpperCAmelCase)
| 226
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275
|
def _lowercase ( lowercase__ , lowercase__ ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowercase ( lowercase__ , lowercase__=0 ):
return sorted(lowercase__ , key=lambda lowercase__ : x[column] )
def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowercase__ ):
__lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase : Tuple = current_dis
return min_dis
def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , lowercase__ ):
for j in range(max(0 , i - 6 ) , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase : int = current_dis
return min_dis
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowercase__ , lowercase__ )
# recursion
__lowerCAmelCase : Optional[Any] = points_counts // 2
__lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr(
lowercase__ , points_sorted_on_y[:mid] , lowercase__ )
__lowerCAmelCase : str = closest_pair_of_points_sqr(
lowercase__ , points_sorted_on_y[mid:] , points_counts - mid )
__lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ )
__lowerCAmelCase : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowercase__ )
__lowerCAmelCase : List[Any] = dis_between_closest_in_strip(
lowercase__ , len(lowercase__ ) , lowercase__ )
return min(lowercase__ , lowercase__ )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 )
__lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowercase__ , lowercase__ , lowercase__ )
) ** 0.5
if __name__ == "__main__":
_UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 275
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 366
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str , __A : Optional[Any]=1_3 , __A : Dict=7 , __A : List[str]=True , __A : Any=True , __A : str=True , __A : Optional[Any]=True , __A : List[str]=9_9 , __A : Dict=3_2 , __A : Tuple=2 , __A : Tuple=4 , __A : Dict=3_7 , __A : Tuple="gelu" , __A : Any=0.1 , __A : str=0.1 , __A : int=5_1_2 , __A : Union[str, Any]=1_6 , __A : Optional[int]=2 , __A : Union[str, Any]=0.0_2 , __A : Tuple=3 , __A : Union[str, Any]=4 , __A : Optional[int]=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = 1_3
snake_case__ : int = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = True
snake_case__ : int = True
snake_case__ : Optional[int] = 9_9
snake_case__ : Union[str, Any] = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : Union[str, Any] = 4
snake_case__ : Any = 3_7
snake_case__ : Any = "gelu"
snake_case__ : str = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : Union[str, Any] = 5_1_2
snake_case__ : Optional[Any] = 1_6
snake_case__ : List[Any] = 2
snake_case__ : Optional[int] = 0.0_2
snake_case__ : Dict = 3
snake_case__ : Any = 4
snake_case__ : int = 1_2_8
snake_case__ : Dict = 2
snake_case__ : Any = 9
snake_case__ : List[str] = 1
snake_case__ : List[Any] = None
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict , __A : Dict , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Optional[int] = TFConvBertModel(config=__A )
snake_case__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : List[str] = [input_ids, input_mask]
snake_case__ : Union[str, Any] = model(__A )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , __A : List[Any] , __A : Any , __A : Union[str, Any] , __A : int , __A : Optional[Any] , __A : Dict , __A : Optional[int] ):
snake_case__ : List[str] = TFConvBertForMaskedLM(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : Any , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] ):
snake_case__ : Any = self.num_labels
snake_case__ : List[Any] = TFConvBertForSequenceClassification(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , __A : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Any = TFConvBertForMultipleChoice(config=__A )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] , __A : Tuple , __A : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any , __A : int , __A : Tuple ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = TFConvBertForTokenClassification(config=__A )
snake_case__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Any , __A : Any , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Any = TFConvBertForQuestionAnswering(config=__A )
snake_case__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
snake_case__ : int = True
if hasattr(__A , "use_cache" ):
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , "key_length" , __A )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__A , __A )
snake_case__ : List[str] = model_class(__A )
snake_case__ : List[Any] = len(model(__A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
snake_case__ : str = os.path.join(__A , "saved_model" , "1" )
snake_case__ : str = tf.keras.models.load_model(__A )
snake_case__ : Optional[Any] = model(__A )
if self.is_encoder_decoder:
snake_case__ : Tuple = outputs["encoder_hidden_states"]
snake_case__ : str = outputs["encoder_attentions"]
else:
snake_case__ : Dict = outputs["hidden_states"]
snake_case__ : Tuple = outputs["attentions"]
self.assertEqual(len(__A ) , __A )
snake_case__ : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__A )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , "key_length" , __A )
snake_case__ : List[Any] = getattr(self.model_tester , "key_length" , __A )
def check_decoder_attentions_output(__A : Optional[int] ):
snake_case__ : Optional[Any] = len(__A )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__A : Any ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
snake_case__ : Any = False
snake_case__ : Dict = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = len(__A )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_decoder_attentions_output(__A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = True
snake_case__ : Any = model_class(__A )
snake_case__ : str = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) )
self.assertEqual(model.config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
snake_case__ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str = model(__A )[0]
snake_case__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __A )
snake_case__ : List[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-4 )
| 286
| 0
|
from __future__ import annotations
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[float, list[float]]:
UpperCAmelCase : str = list(range(len(UpperCAmelCase ) ) )
UpperCAmelCase : List[Any] = [v / w for v, w in zip(UpperCAmelCase , UpperCAmelCase )]
index.sort(key=lambda UpperCAmelCase : ratio[i] , reverse=UpperCAmelCase )
UpperCAmelCase : float = 0
UpperCAmelCase : list[float] = [0] * len(UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase : Dict = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
_lowercase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small" )
_lowercase : Optional[Any] = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_lowercase : Tuple = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_lowercase : Dict = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
_lowercase : List[str] = -(labels.shape[-1] * loss.item())
_lowercase : Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 199
|
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCAmelCase = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase_ (self ) -> Dict:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
__UpperCAmelCase = DisjunctiveConstraint(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(1 )
__UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(2 )
__UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(3 )
__UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCAmelCase = DisjunctiveConstraint(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 333
|
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = {}
__A = job["started_at"]
__A = job["completed_at"]
__A = date_parser.parse(a_ )
__A = date_parser.parse(a_ )
__A = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A = start
__A = end
__A = duration_in_min
return job_info
def UpperCAmelCase ( a_ , a_=None ) -> List[Any]:
"""simple docstring"""
__A = None
if token is not None:
__A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
__A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__A = requests.get(a_ , headers=a_ ).json()
__A = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
__A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
__A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE :Optional[int] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE :int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 124
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __snake_case ( _lowerCamelCase ):
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : Optional[int] = 5
# Realm tok
snake_case__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case__ : List[str] = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case__ : Dict = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
def __a ( self ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=__UpperCamelCase , )
return block_records
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : int = self.get_config()
snake_case__ : int = self.get_dummy_retriever()
snake_case__ : Union[str, Any] = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3] , dtype='long' )
snake_case__ : Optional[int] = tokenizer(['Test question'] ).input_ids
snake_case__ : Union[str, Any] = tokenizer(
['the fourth'] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
snake_case__ : Tuple = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='np' )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = self.get_config()
snake_case__ : Optional[int] = self.get_dummy_retriever()
snake_case__ : Tuple = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3, 5] , dtype='long' )
snake_case__ : Tuple = tokenizer(['Test question'] ).input_ids
snake_case__ : Any = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ).input_ids
snake_case__ : Optional[int] = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever(
__UpperCamelCase , __UpperCamelCase , answer_ids=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='np' )
self.assertEqual([False, True, True] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
snake_case__ : List[str] = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case__ : Optional[int] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 143
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 1
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = (UniPCMultistepScheduler,)
lowerCAmelCase_ = (('''num_inference_steps''', 25),)
def UpperCAmelCase__ ( self : List[str] , **_A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_A )
return config
def UpperCAmelCase__ ( self : Any , _A : List[Any]=0 , **_A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''num_inference_steps''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample
__SCREAMING_SNAKE_CASE : int = 0.1 * sample
__SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config(**_A )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__SCREAMING_SNAKE_CASE : Any = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Optional[int] , _A : Tuple=0 , **_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''num_inference_steps''' , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
__SCREAMING_SNAKE_CASE : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__SCREAMING_SNAKE_CASE : Tuple = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : int , _A : List[Any]=None , **_A : Optional[Any] ):
"""simple docstring"""
if scheduler is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**_A )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config(**_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**_A )
__SCREAMING_SNAKE_CASE : str = 10
__SCREAMING_SNAKE_CASE : Any = self.dummy_model()
__SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = model(_A , _A )
__SCREAMING_SNAKE_CASE : str = scheduler.step(_A , _A , _A ).prev_sample
return sample
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''num_inference_steps''' , _A )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_A )
__SCREAMING_SNAKE_CASE : Any = self.dummy_sample
__SCREAMING_SNAKE_CASE : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ):
__SCREAMING_SNAKE_CASE : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
__SCREAMING_SNAKE_CASE : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__SCREAMING_SNAKE_CASE : int = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = UniPCMultistepScheduler(**self.get_scheduler_config() )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=_A )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
__SCREAMING_SNAKE_CASE : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Any = DEISMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Any = UniPCMultistepScheduler.from_config(scheduler.config )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=_A )
__SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , solver_order=_A , solver_type=_A , )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , )
__SCREAMING_SNAKE_CASE : Tuple = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.full_loop()
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.full_loop(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_A )
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
__SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = model(_A , _A )
__SCREAMING_SNAKE_CASE : str = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : Any , **_A : Optional[Any] ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 303
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 1
|
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCamelCase__ : Any = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ):
'''simple docstring'''
a = vertices
a = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a = weight
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = Graph({min(self.vertices )} ,{} )
a = 42
a = 42
a = 42
a = 42
while len(subgraph.vertices ) < len(self.vertices ):
a = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a = edge
a = weight
subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase )
return subgraph
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int:
"""simple docstring"""
a = os.path.abspath(os.path.dirname(snake_case_ ) )
a = os.path.join(snake_case_, snake_case_ )
a = {}
a = 42
a = 42
a = 42
with open(snake_case_ ) as f:
a = f.read().strip().split('''\n''' )
a = [line.split(''',''' ) for line in data]
for edgea in range(1, len(snake_case_ ) ):
for edgea in range(snake_case_ ):
if adjaceny_matrix[edgea][edgea] != "-":
a = int(adjaceny_matrix[edgea][edgea] )
a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ )
a = graph.prims_algorithm()
a = sum(graph.edges.values() )
a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 330
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Optional[int] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Optional[Any] = 8
# DPR tok
lowercase__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[int] ):
lowercase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Union[str, Any] = dataset
lowercase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Dict = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
lowercase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Any = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self : int ):
lowercase__ : Any = 1
lowercase__ : str = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : str ):
lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 1
lowercase__ : List[str] = self.get_dummy_legacy_index_retriever()
lowercase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : Any ):
import torch
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = [[5, 7], [10, 11]]
lowercase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
lowercase__ : List[str] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : int ):
lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 130
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCamelCase_ ):
lowercase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
requires_backends(self ,['transformers', 'torch', 'note_seq'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
requires_backends(cls ,['transformers', 'torch', 'note_seq'] )
@classmethod
def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['transformers', 'torch', 'note_seq'] )
| 367
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Optional[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int , lowerCAmelCase__: Dict=8 ):
"""simple docstring"""
UpperCAmelCase_: Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_: Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: str=5_1_2 , lowerCAmelCase__: str=5_1_2 ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_: int = np.array(pil_image.convert("""RGB""" ) )
UpperCAmelCase_: Optional[Any] = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_: List[str] = np.transpose(lowerCAmelCase__ , [2, 0, 1] )
UpperCAmelCase_: Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
return image
class _a ( UpperCAmelCase__ ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, movq=_SCREAMING_SNAKE_CASE, )
UpperCAmelCase_: Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = min(int(num_inference_steps * strength ), _SCREAMING_SNAKE_CASE )
UpperCAmelCase_: str = max(num_inference_steps - init_timestep, 0 )
UpperCAmelCase_: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if not isinstance(_SCREAMING_SNAKE_CASE, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}' )
UpperCAmelCase_: List[str] = image.to(device=_SCREAMING_SNAKE_CASE, dtype=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_: str = image
else:
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_: Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_SCREAMING_SNAKE_CASE )
]
UpperCAmelCase_: List[str] = torch.cat(_SCREAMING_SNAKE_CASE, dim=0 )
else:
UpperCAmelCase_: str = self.movq.encode(_SCREAMING_SNAKE_CASE ).latent_dist.sample(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: Tuple = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_: Tuple = torch.cat([init_latents], dim=0 )
UpperCAmelCase_: int = init_latents.shape
UpperCAmelCase_: Union[str, Any] = randn_tensor(_SCREAMING_SNAKE_CASE, generator=_SCREAMING_SNAKE_CASE, device=_SCREAMING_SNAKE_CASE, dtype=_SCREAMING_SNAKE_CASE )
# get latents
UpperCAmelCase_: Optional[Any] = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
UpperCAmelCase_: List[Any] = init_latents
return latents
def __snake_case (self, SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase_: Any = torch.device(f'cuda:{gpu_id}' )
UpperCAmelCase_: Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
def __snake_case (self, SCREAMING_SNAKE_CASE_=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(""">=""", """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase_: str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""", silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_: Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_: int = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
UpperCAmelCase_: List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case (self ) -> Dict:
if not hasattr(self.unet, """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE, """_hf_hook""" )
and hasattr(module._hf_hook, """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = 4.0, SCREAMING_SNAKE_CASE_ = 0.3, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, ) -> Tuple:
UpperCAmelCase_: Any = self._execution_device
UpperCAmelCase_: Union[str, Any] = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_: int = torch.cat(_SCREAMING_SNAKE_CASE, dim=0 )
UpperCAmelCase_: Any = image_embeds.shape[0]
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_: str = torch.cat(_SCREAMING_SNAKE_CASE, dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_: int = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE, dim=0 )
UpperCAmelCase_: List[Any] = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE, dim=0 )
UpperCAmelCase_: Optional[int] = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_: Tuple = [image]
if not all(isinstance(_SCREAMING_SNAKE_CASE, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'Input is in incorrect format: {[type(_SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
UpperCAmelCase_: Optional[Any] = torch.cat([prepare_image(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) for i in image], dim=0 )
UpperCAmelCase_: Any = image.to(dtype=image_embeds.dtype, device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: int = self.movq.encode(_SCREAMING_SNAKE_CASE )["""latents"""]
UpperCAmelCase_: Tuple = latents.repeat_interleave(_SCREAMING_SNAKE_CASE, dim=0 )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE, device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: Dict = self.get_timesteps(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
UpperCAmelCase_: str = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_: int = downscale_height_and_width(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, self.movq_scale_factor )
UpperCAmelCase_: str = self.prepare_latents(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, image_embeds.dtype, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_: Union[str, Any] = {"""image_embeds""": image_embeds}
UpperCAmelCase_: Union[str, Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE, timestep=_SCREAMING_SNAKE_CASE, encoder_hidden_states=_SCREAMING_SNAKE_CASE, added_cond_kwargs=_SCREAMING_SNAKE_CASE, return_dict=_SCREAMING_SNAKE_CASE, )[0]
if do_classifier_free_guidance:
UpperCAmelCase_: Dict = noise_pred.split(latents.shape[1], dim=1 )
UpperCAmelCase_: Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_: Any = variance_pred.chunk(2 )
UpperCAmelCase_: Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_: Optional[int] = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_: int = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_: List[Any] = self.scheduler.step(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, generator=_SCREAMING_SNAKE_CASE, )[0]
# post-processing
UpperCAmelCase_: Optional[Any] = self.movq.decode(_SCREAMING_SNAKE_CASE, force_not_quantize=_SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase_: Union[str, Any] = image * 0.5 + 0.5
UpperCAmelCase_: Any = image.clamp(0, 1 )
UpperCAmelCase_: Optional[int] = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_: Tuple = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 147
|
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : str = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def _snake_case ( UpperCamelCase : int = 800800 , UpperCamelCase : int = 800800 ):
UpperCAmelCase : Union[str, Any] = degree * loga(UpperCamelCase )
UpperCAmelCase : int = int(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = calculate_prime_numbers(UpperCamelCase )
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Dict = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple, **a_ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
UpperCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="np" )
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
@require_torch
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
UpperCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="np" )
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
@require_tf
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, tf.convert_to_tensor(__lowerCAmelCase ), tf.convert_to_tensor(__lowerCAmelCase ), return_tensors="tf", )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors="tf" )
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCamelCase__ = [tf.convert_to_tensor(__lowerCAmelCase )]
UpperCamelCase__ = [torch.tensor(__lowerCAmelCase )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="tf" )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( __lowercase : Tuple , __lowercase : Union[str, Any]=0.9_9_9 , __lowercase : List[Any]="cosine" , ) -> List[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowercase : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowercase : str ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCamelCase = []
for i in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase = i / num_diffusion_timesteps
__UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =[e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : int =2
@register_to_config
def __init__( self : Tuple , __A : Optional[int] = 1_0_0_0 , __A : int = 0.0_0085 , __A : Optional[Any] = 0.012 , __A : Dict = "linear" , __A : List[Any] = None , __A : Optional[int] = "epsilon" , __A : Union[str, Any] = False , __A : int = False , __A : Optional[int] = 1.0 , __A : Optional[int] = "linspace" , __A : int = 0 , ):
if trained_betas is not None:
__UpperCamelCase = torch.tensor(__A , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase = torch.linspace(__A , __A , __A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase = betas_for_alpha_bar(__A , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__UpperCamelCase = betas_for_alpha_bar(__A , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__UpperCamelCase = 1.0 - self.betas
__UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__A , __A , __A )
__UpperCamelCase = use_karras_sigmas
def _lowerCamelCase ( self : List[Any] , __A : List[Any] , __A : Any=None ):
if schedule_timesteps is None:
__UpperCamelCase = self.timesteps
__UpperCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase = 1 if len(__A ) > 1 else 0
else:
__UpperCamelCase = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
__UpperCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : List[Any] ):
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Union[str, Any] , __A : List[str] , __A : Tuple , ):
__UpperCamelCase = self.index_for_timestep(__A )
__UpperCamelCase = self.sigmas[step_index]
__UpperCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : Any , __A : Dict , __A : List[str] = None , __A : Union[str, Any] = None , ):
__UpperCamelCase = num_inference_steps
__UpperCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase = np.linspace(0 , num_train_timesteps - 1 , __A , dtype=__A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase = (np.arange(0 , __A ) * step_ratio).round()[::-1].copy().astype(__A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase = (np.arange(__A , 0 , -step_ratio )).round().copy().astype(__A )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__UpperCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase = np.log(__A )
__UpperCamelCase = np.interp(__A , np.arange(0 , len(__A ) ) , __A )
if self.config.use_karras_sigmas:
__UpperCamelCase = self._convert_to_karras(in_sigmas=__A , num_inference_steps=self.num_inference_steps )
__UpperCamelCase = np.array([self._sigma_to_t(__A , __A ) for sigma in sigmas] )
__UpperCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase = torch.from_numpy(__A ).to(device=__A )
__UpperCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase = torch.from_numpy(__A )
__UpperCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__A ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase = timesteps.to(__A , dtype=torch.floataa )
else:
__UpperCamelCase = timesteps.to(device=__A )
# empty dt and derivative
__UpperCamelCase = None
__UpperCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase = defaultdict(__A )
def _lowerCamelCase ( self : Tuple , __A : Any , __A : Tuple ):
__UpperCamelCase = np.log(__A )
# get distribution
__UpperCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__UpperCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__UpperCamelCase = low_idx + 1
__UpperCamelCase = log_sigmas[low_idx]
__UpperCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase = (low - log_sigma) / (low - high)
__UpperCamelCase = np.clip(__A , 0 , 1 )
# transform interpolation to time range
__UpperCamelCase = (1 - w) * low_idx + w * high_idx
__UpperCamelCase = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , __A : str , __A : Dict ):
__UpperCamelCase = in_sigmas[-1].item()
__UpperCamelCase = in_sigmas[0].item()
__UpperCamelCase = 7.0 # 7.0 is the value used in the paper
__UpperCamelCase = np.linspace(0 , 1 , __A )
__UpperCamelCase = sigma_min ** (1 / rho)
__UpperCamelCase = sigma_max ** (1 / rho)
__UpperCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Optional[Any] ):
return self.dt is None
def _lowerCamelCase ( self : Optional[int] , __A : List[Any] , __A : Optional[int] , __A : Tuple , __A : List[str] = True , ):
__UpperCamelCase = self.index_for_timestep(__A )
# advance index counter by 1
__UpperCamelCase = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase = self.sigmas[step_index]
__UpperCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__UpperCamelCase = self.sigmas[step_index - 1]
__UpperCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase = 0
__UpperCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__UpperCamelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
__UpperCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__UpperCamelCase = derivative
__UpperCamelCase = dt
__UpperCamelCase = sample
else:
# 2. 2nd order / Heun's method
__UpperCamelCase = (sample - pred_original_sample) / sigma_next
__UpperCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__UpperCamelCase = self.dt
__UpperCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def _lowerCamelCase ( self : int , __A : Any , __A : Dict , __A : Union[str, Any] , ):
__UpperCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__A ):
# mps does not support float64
__UpperCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCamelCase = self.timesteps.to(original_samples.device )
__UpperCamelCase = timesteps.to(original_samples.device )
__UpperCamelCase = [self.index_for_timestep(__A , __A ) for t in timesteps]
__UpperCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase = sigma.unsqueeze(-1 )
__UpperCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 53
|
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase : Tuple = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# A temporary array to store all combination one by one
lowercase : Optional[int] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 20
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """CLIPImageProcessor"""
lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Dict , __snake_case : str=None , __snake_case : Dict=None , **__snake_case : int ):
a : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __snake_case , )
a : int = kwargs.pop('feature_extractor' )
a : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case , __snake_case )
def __call__( self : Union[str, Any] , __snake_case : str=None , __snake_case : Any=None , __snake_case : str=None , **__snake_case : List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a : Any = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
a : Optional[Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
a : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowercase_ ( self : List[str] , *__snake_case : str , **__snake_case : Union[str, Any] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : Optional[Any] , *__snake_case : Tuple , **__snake_case : Tuple ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.tokenizer.model_input_names
a : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self : int ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __snake_case , )
return self.image_processor_class
@property
def lowercase_ ( self : Any ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __snake_case , )
return self.image_processor
| 362
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 96
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=True , a=1 / 2_5_5 , a=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : Dict = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowercase__ : Optional[int] = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : List[str] = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : str = do_normalize
lowercase__ : List[Any] = image_mean
lowercase__ : int = image_std
lowercase__ : List[Any] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_pad
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a , a=False ) -> Dict:
if not batched:
lowercase__ : str = image_inputs[0]
if isinstance(a , Image.Image ):
lowercase__ , lowercase__ : List[str] = image.size
else:
lowercase__ , lowercase__ : int = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Any = int(self.size['shortest_edge'] * h / w )
lowercase__ : Dict = self.size['shortest_edge']
elif w > h:
lowercase__ : int = self.size['shortest_edge']
lowercase__ : Tuple = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ : Optional[Any] = self.size['shortest_edge']
lowercase__ : List[Any] = self.size['shortest_edge']
else:
lowercase__ : Union[str, Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Optional[Any] = max(a , key=lambda a : item[0] )[0]
lowercase__ : Optional[Any] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[str] = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(a , batched=a )
lowercase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Dict:
# prepare image and target
lowercase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__ : Tuple = json.loads(f.read() )
lowercase__ : Optional[int] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowercase__ : Union[str, Any] = DetaImageProcessor()
lowercase__ : List[str] = image_processing(images=a , annotations=a , return_tensors='pt' )
# verify pixel values
lowercase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : str = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify orig_size
lowercase__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# prepare image, target and masks_path
lowercase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__ : int = json.loads(f.read() )
lowercase__ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowercase__ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ : List[Any] = DetaImageProcessor(format='coco_panoptic' )
lowercase__ : int = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' )
# verify pixel values
lowercase__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : Tuple = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify masks
lowercase__ : Dict = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a )
# verify orig_size
lowercase__ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
| 77
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 210
| 0
|
import numpy as np
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
# compute the shape of the output matrix
lowercase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase__ = 0
lowercase__ = 0
return updated_arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
# compute the shape of the output matrix
lowercase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase__ = 0
lowercase__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowercase_ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 269
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269
| 1
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__UpperCamelCase : Tuple = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__UpperCamelCase : str = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = calculate_rouge(A_ , A_ , bootstrap_aggregation=A_ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(A_ , A_ )
lowerCAmelCase__ : Tuple = calculate_rouge(A_ , A_ , bootstrap_aggregation=A_ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[Any] = '''rougeLsum'''
lowerCAmelCase__ : Any = calculate_rouge(A_ , A_ , newline_sep=A_ , rouge_keys=[k] )[k]
lowerCAmelCase__ : Union[str, Any] = calculate_rouge(A_ , A_ , newline_sep=A_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = ['''rouge1''', '''rouge2''', '''rougeL''']
lowerCAmelCase__ : Tuple = calculate_rouge(A_ , A_ , newline_sep=A_ , rouge_keys=A_ )
lowerCAmelCase__ : Optional[int] = calculate_rouge(A_ , A_ , newline_sep=A_ , rouge_keys=A_ )
assert score_sep == score_no_sep
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
lowerCAmelCase__ : List[str] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(A_ , A_ , newline_sep=A_ ) == calculate_rouge(A_ , A_ , newline_sep=A_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[int] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
lowerCAmelCase__ : int = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
lowerCAmelCase__ : Union[str, Any] = calculate_rouge(A_ , A_ , rouge_keys=['''rougeLsum'''] , newline_sep=A_ )['''rougeLsum''']
lowerCAmelCase__ : str = calculate_rouge(A_ , A_ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[Any] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
lowerCAmelCase__ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(A_ , A_ )
lowerCAmelCase__ : str = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=A_ )
assert isinstance(A_ , A_ )
| 106
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase : str = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106
| 1
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ : int = TypeVar('''T''')
class __lowerCAmelCase ( Generic[T] ):
snake_case : deque[T] # Cache store of keys
snake_case : set[T] # References of the keys in cache
snake_case : int = 1_0 # Maximum capacity of cache
def __init__(self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = deque()
_UpperCAmelCase : Optional[int] = set()
if not n:
_UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_UpperCAmelCase : Optional[Any] = n
def snake_case_ (self , lowerCAmelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase__ )
else:
self.dq_store.remove(lowerCAmelCase__ )
self.dq_store.appendleft(lowerCAmelCase__ )
self.key_reference.add(lowerCAmelCase__ )
def snake_case_ (self ):
for k in self.dq_store:
print(lowerCAmelCase__ )
def __repr__(self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Union[str, Any] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 366
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_UpperCAmelCase : Dict = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_UpperCAmelCase : Optional[Any] = nn.Parameter(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : List[str] = np.asarray(weights[0] )
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : Optional[int] = np.asarray(weights[0] )
_UpperCAmelCase : Tuple = np.asarray(weights[1] )
_UpperCAmelCase : List[str] = np.asarray(weights[2] )
_UpperCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# layernorm 1
_UpperCAmelCase : Tuple = weights[0][0][0]
_UpperCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
_UpperCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
_UpperCAmelCase : List[Any] = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
_UpperCAmelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
_UpperCAmelCase : List[str] = intermediate_weights[2]
# layernorm 2
_UpperCAmelCase : str = np.asarray(intermediate_weights[0][0] )
_UpperCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
_UpperCAmelCase : int = np.asarray(intermediate_weights[1][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
_UpperCAmelCase : Tuple = np.asarray(intermediate_weights[4][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# reformer model
_UpperCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCAmelCase : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_UpperCAmelCase : Dict = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
_UpperCAmelCase : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCAmelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
_UpperCAmelCase : str = np.asarray(weights[7][0] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
_UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Initialise PyTorch model
_UpperCAmelCase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : Any = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
_UpperCAmelCase : List[str] = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 170
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = MobileBertConfig.from_json_file(__lowercase )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = MobileBertForPreTraining(__lowercase )
# Load weights from tf checkpoint
_UpperCAmelCase = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 22
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations(__UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__UpperCAmelCase , __UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __UpperCAmelCase )
for item in array )
lowerCAmelCase__ : List[str] = answer
return answer
lowerCAmelCase__ : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = [0] * (target + 1)
lowerCAmelCase__ : int = 1
for i in range(1 , target + 1 ):
for j in range(__UpperCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = 3
_A = 5
_A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 242
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase: int = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a__( lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = """focalnet"""
def __init__( self : Tuple , __snake_case : str=2_24 , __snake_case : Dict=4 , __snake_case : str=3 , __snake_case : Union[str, Any]=96 , __snake_case : List[str]=False , __snake_case : int=[1_92, 3_84, 7_68, 7_68] , __snake_case : List[str]=[2, 2, 6, 2] , __snake_case : int=[2, 2, 2, 2] , __snake_case : Tuple=[3, 3, 3, 3] , __snake_case : List[str]="gelu" , __snake_case : List[Any]=4.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=False , __snake_case : Any=1e-4 , __snake_case : Optional[Any]=False , __snake_case : int=False , __snake_case : Dict=False , __snake_case : str=0.02 , __snake_case : int=1e-5 , __snake_case : Optional[Any]=32 , __snake_case : Dict=None , __snake_case : Dict=None , **__snake_case : List[str] , ):
super().__init__(**__snake_case )
a : Optional[int] = image_size
a : Union[str, Any] = patch_size
a : int = num_channels
a : List[str] = embed_dim
a : Union[str, Any] = use_conv_embed
a : Any = hidden_sizes
a : List[Any] = depths
a : Any = focal_levels
a : Any = focal_windows
a : Optional[int] = hidden_act
a : List[Any] = mlp_ratio
a : Optional[Any] = hidden_dropout_prob
a : Dict = drop_path_rate
a : Optional[int] = use_layerscale
a : Tuple = layerscale_value
a : Optional[Any] = use_post_layernorm
a : List[str] = use_post_layernorm_in_modulation
a : Optional[int] = normalize_modulator
a : Union[str, Any] = initializer_range
a : Optional[Any] = layer_norm_eps
a : Optional[Any] = encoder_stride
a : str = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a : Optional[int] = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
| 96
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '🤗 Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 96
| 1
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _A( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase : Optional[datasets.Features] = None
def _SCREAMING_SNAKE_CASE ( a , a , ) -> Union[str, Any]:
import pyspark
def generate_fn():
__A : str = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__A : int = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' )
__A : Optional[Any] = partition_df.collect()
__A : Optional[Any] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _A( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , _A , _A=None , ):
__A : Union[str, Any] = df
__A : Any = partition_order or range(self.df.rdd.getNumPartitions() )
__A : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Optional[int] = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def UpperCAmelCase_ ( self ):
return len(self.partition_order )
class _A( datasets.DatasetBuilder ):
"""simple docstring"""
UpperCamelCase : List[Any] = SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ):
import pyspark
__A : Any = pyspark.sql.SparkSession.builder.getOrCreate()
__A : List[Any] = df
__A : Optional[int] = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def UpperCAmelCase_ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
__A : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__A : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCAmelCase_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self , _A ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase_ ( self , _A ):
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__A : Any = self.df.count()
__A : List[str] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__A : str = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__A : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__A : Optional[Any] = min(_A , int(approx_total_size / max_shard_size ) )
__A : str = self.df.repartition(_A )
def UpperCAmelCase_ ( self , _A , _A , _A , ):
import pyspark
__A : Tuple = ParquetWriter if file_format == 'parquet' else ArrowWriter
__A : Optional[Any] = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
__A : Dict = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__A : int = self.config.features
__A : List[Any] = self._writer_batch_size
__A : int = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__A : List[Any] = pyspark.TaskContext().taskAttemptId()
__A : List[str] = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__A : Dict = 0
__A : List[str] = writer_class(
features=_A , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__A : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__A , __A : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__A : List[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__A : List[Any] = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
__A , __A : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
__A : List[Any] = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
__A : List[str] = (
self.df.mapInArrow(_A , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase_ ( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ):
self._validate_cache_dir()
__A : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
__A : List[Any] = not is_remote_filesystem(self._fs )
__A : Union[str, Any] = os.path.join if is_local else posixpath.join
__A : List[str] = '-TTTTT-SSSSS-of-NNNNN'
__A : Any = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__A : str = path_join(self._output_dir , _A )
__A : Any = 0
__A : List[str] = 0
__A : Optional[Any] = 0
__A : Tuple = []
__A : Optional[Any] = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : List[str] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
__A : Optional[int] = total_num_examples
__A : Tuple = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__A : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__A : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
__A : List[Any] = []
__A : List[Any] = 0
for i in range(len(_A ) ):
__A , __A : Tuple = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
__A : Union[str, Any] = 0
__A : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(_A , '' ) , )
def UpperCAmelCase_ ( self , _A , ):
return SparkExamplesIterable(self.df )
| 280
|
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_A = logging.getLogger(__name__)
def lowerCamelCase__ ( a__ : Tuple , a__ : Optional[int] ) -> Dict:
return (preds == labels).mean()
@dataclass
class lowercase_ :
A__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowercase_ :
A__ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
A__ : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
A__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase_ = processors[data_args.task_name]()
UpperCamelCase_ = processor.get_labels()
UpperCamelCase_ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ : EvalPrediction ) -> Dict:
UpperCamelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase_ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase_ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_ = trainer.evaluate()
UpperCamelCase_ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def lowerCamelCase__ ( a__ : int ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 261
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = """align_text_model"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = """align_vision_model"""
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 6_0_0 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.25 , __UpperCamelCase = "swish" , __UpperCamelCase = 2_5_6_0 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.02 , __UpperCamelCase = 0.001 , __UpperCamelCase = 0.99 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = width_coefficient
UpperCamelCase_ = depth_coefficient
UpperCamelCase_ = depth_divisor
UpperCamelCase_ = kernel_sizes
UpperCamelCase_ = in_channels
UpperCamelCase_ = out_channels
UpperCamelCase_ = depthwise_padding
UpperCamelCase_ = strides
UpperCamelCase_ = num_block_repeats
UpperCamelCase_ = expand_ratios
UpperCamelCase_ = squeeze_expansion_ratio
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = pooling_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = batch_norm_eps
UpperCamelCase_ = batch_norm_momentum
UpperCamelCase_ = drop_connect_rate
UpperCamelCase_ = sum(__UpperCamelCase ) * 4
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = """align"""
A__ : int = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=6_4_0 , __UpperCamelCase=1.0 , __UpperCamelCase=0.02 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCamelCase_ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
UpperCamelCase_ = AlignTextConfig(**__UpperCamelCase )
UpperCamelCase_ = AlignVisionConfig(**__UpperCamelCase )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = temperature_init_value
UpperCamelCase_ = initializer_range
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 261
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.