code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = ""
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
import random
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =num - 1
__UpperCamelCase =0
while s % 2 == 0:
__UpperCamelCase =s // 2
t += 1
for _ in range(5 ):
__UpperCamelCase =random.randrange(2 , num - 1 )
__UpperCamelCase =pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
__UpperCamelCase =0
while v != (num - 1):
if i == t - 1:
return False
else:
__UpperCamelCase =i + 1
__UpperCamelCase =(v**2) % num
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if num < 2:
return False
__UpperCamelCase =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_24 ):
while True:
__UpperCamelCase =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
_A = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 62 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
'''simple docstring'''
import os
def _lowerCamelCase ( ) -> Tuple:
with open(os.path.dirname(lowercase ) + "/grid.txt" ) as f:
_a = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
_a = 0
# right
for i in range(20 ):
for j in range(17 ):
_a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_a = temp
# down
for i in range(17 ):
for j in range(20 ):
_a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 63 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = 'esm'
def __init__(self : List[str] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=7_6_8 , __UpperCAmelCase : List[str]=1_2 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : List[str]=3_0_7_2 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : int=1_0_2_6 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , mask_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = emb_layer_norm_before
UpperCAmelCase__ = token_dropout
UpperCAmelCase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase__ = EsmFoldConfig()
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = EsmFoldConfig(**__UpperCAmelCase )
UpperCAmelCase__ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase__ = get_default_vocab_list()
else:
UpperCAmelCase__ = vocab_list
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
if isinstance(self.esmfold_config , __UpperCAmelCase ):
UpperCAmelCase__ = self.esmfold_config.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : str = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : "TrunkConfig" = None
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.trunk is None:
UpperCAmelCase__ = TrunkConfig()
elif isinstance(self.trunk , __UpperCAmelCase ):
UpperCAmelCase__ = TrunkConfig(**self.trunk )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = asdict(self )
UpperCAmelCase__ = self.trunk.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : int = 48
__UpperCAmelCase : int = 10_24
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : float = 0
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = 1_28
__UpperCAmelCase : "StructureModuleConfig" = None
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
if self.structure_module is None:
UpperCAmelCase__ = StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCAmelCase ):
UpperCAmelCase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase__ = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = asdict(self )
UpperCAmelCase__ = self.structure_module.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : int = 3_84
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 16
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 4
__UpperCAmelCase : int = 8
__UpperCAmelCase : float = 0.1
__UpperCAmelCase : int = 8
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = 10
__UpperCAmelCase : float = 1E-8
__UpperCAmelCase : float = 1E5
def lowercase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
return asdict(self )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 65 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__a = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
__a = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
__a = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[int] , snake_case: List[Any] , snake_case: Optional[int]=False ) -> int:
if return_pvalue:
snake_case_ :Union[str, Any] = pearsonr(snake_case , snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case , snake_case )[0] )}
| 66 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 0 |
'''simple docstring'''
import requests
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = {'''Content-Type''': '''application/json'''}
__lowerCamelCase = requests.post(UpperCamelCase__ , json={'''text''': message_body} , headers=UpperCamelCase__ )
if response.status_code != 2_00:
__lowerCamelCase = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 67 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any]=7 ) -> str:
'''simple docstring'''
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
A__ = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
return result["workflow_runs"]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> List[str]:
'''simple docstring'''
A__ = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'{artifact_name}.zip' )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
A__ = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 68 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(lowerCAmelCase__)
self.set_fail_transitions()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(lowerCAmelCase__, lowerCAmelCase__)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
snake_case_ = len(self.adlist) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(lowerCAmelCase__)
def a_ ( self) -> None:
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = self.adlist[r]['fail_state']
while (
self.find_next_state(lowerCAmelCase__, self.adlist[child]['value']) is None
and state != 0
):
snake_case_ = self.adlist[state]['fail_state']
snake_case_ = self.find_next_state(
lowerCAmelCase__, self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def a_ ( self, lowerCAmelCase__) -> dict[str, list[int]]:
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(lowerCAmelCase__)):
while (
self.find_next_state(lowerCAmelCase__, string[i]) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]['fail_state']
snake_case_ = self.find_next_state(lowerCAmelCase__, string[i])
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(lowerCAmelCase__) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A__ : List[Any] =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
_lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "running_mean":
_lowerCAmelCase = value
elif weight_type == "running_var":
_lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase = value
elif weight_type == "inv_freq":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(lowerCAmelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , lowerCAmelCase )
if "pos_bias_u" in name:
_lowerCAmelCase = None
elif "pos_bias_v" in name:
_lowerCAmelCase = None
elif "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
elif "running_mean" in name:
_lowerCAmelCase = """running_mean"""
elif "inv_freq" in name:
_lowerCAmelCase = """inv_freq"""
elif "running_var" in name:
_lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_lowerCAmelCase = """num_batches_tracked"""
else:
_lowerCAmelCase = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase , hidden_act="""swish""" )
else:
_lowerCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCAmelCase = """rotary"""
if is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
_lowerCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase )
else:
_lowerCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase )
if is_finetuned:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A__ : List[Any] =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 70 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ :Any = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PIL.Image.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =size if size is not None else {'height': 256, 'width': 256}
__UpperCamelCase : List[str] =get_size_dict(lowerCamelCase__ )
__UpperCamelCase : Tuple =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase : Optional[int] =get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__UpperCamelCase : Tuple =do_resize
__UpperCamelCase : Any =size
__UpperCamelCase : Dict =resample
__UpperCamelCase : Tuple =do_center_crop
__UpperCamelCase : int =crop_size
__UpperCamelCase : Optional[int] =do_rescale
__UpperCamelCase : List[Any] =rescale_factor
__UpperCamelCase : List[Any] =do_normalize
__UpperCamelCase : Union[str, Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PIL.Image.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : int =get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase__ , size=(size['height'], size['width']) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : Union[str, Any] =resample if resample is not None else self.resample
__UpperCamelCase : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase : Any =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : Optional[Any] =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : Any =image_std if image_std is not None else self.image_std
__UpperCamelCase : Union[str, Any] =size if size is not None else self.size
__UpperCamelCase : List[str] =get_size_dict(lowerCamelCase__ )
__UpperCamelCase : Tuple =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase : Tuple =get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__UpperCamelCase : List[Any] =make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase : Optional[int] =[to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__UpperCamelCase : Optional[Any] =[self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__UpperCamelCase : int =[self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__UpperCamelCase : int =[self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__UpperCamelCase : Optional[int] =[self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__UpperCamelCase : Optional[Any] =[to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__UpperCamelCase : List[str] ={'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 71 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
def __init__( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 72 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''swin2sr'''
_UpperCAmelCase : Dict = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_8_0 ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Any=2.0 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Any="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 ,SCREAMING_SNAKE_CASE__ : str="1conv" ,SCREAMING_SNAKE_CASE__ : List[str]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Optional[int] = embed_dim
__lowerCamelCase : int = depths
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Optional[int] = mlp_ratio
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = drop_path_rate
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Any = upscale
__lowerCamelCase : Any = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Optional[Any] = upsampler
| 73 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
def __call__( self ):
"""simple docstring"""
lowerCamelCase_ =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), )
lowerCamelCase_ =1
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
lowerCamelCase_ =self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase )
return result
| 75 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "ZinengTang/tvlt-base"
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
def __UpperCamelCase ( self : Dict , **a : List[Any] ) -> List[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **a )
def __UpperCamelCase ( self : List[Any] , **a : List[Any] ) -> List[Any]:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a )
self.assertIsInstance(processor.image_processor , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([1_2000] )
SCREAMING_SNAKE_CASE : Any = feature_extractor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE : Tuple = processor(audio=a , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE : str = processor(images=a , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([1_2000] )
SCREAMING_SNAKE_CASE : str = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(audio=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , ) | 76 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = [False] * len(lowercase_ )
UpperCAmelCase = [-1] * len(lowercase_ )
def dfs(lowercase_ , lowercase_ ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase_ , 1 - c )
for i in range(len(lowercase_ ) ):
if not visited[i]:
dfs(lowercase_ , 0 )
for i in range(len(lowercase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
snake_case_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 78 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__lowercase , __lowercase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_A = False
if num < 0:
_A = True
_A = -num
_A = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowercase ) for e in binary )
return "0b" + "".join(str(__lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'vocab.txt'}
a__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Optional[Any] = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : int = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ConvBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(a , normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**a )
UpperCamelCase__ = do_lower_case
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 80 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A ) | 81 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 0 |
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase = range(3 , int(math.sqrt(snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCAmelCase ( snake_case , snake_case=1 , **snake_case ):
"""simple docstring"""
_lowerCAmelCase = factor * value
_lowerCAmelCase = value
while not is_prime(snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case )
return value
| 82 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( lowercase ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """CLIPImageProcessor"""
lowercase__ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Optional[Any] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,lowerCamelCase__ ,)
_UpperCamelCase : Union[str, Any] = kwargs.pop('feature_extractor' )
_UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : int ,lowerCamelCase__ : str=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if images is not None:
_UpperCamelCase : Dict = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None and images is not None:
_UpperCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = self.tokenizer.model_input_names
_UpperCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 83 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[int] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Optional[int] = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Optional[Any] = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Dict = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :int = do_resize
lowerCAmelCase_ :Optional[Any] = size
lowerCAmelCase_ :str = resample
lowerCAmelCase_ :str = do_center_crop
lowerCAmelCase_ :List[Any] = crop_size
lowerCAmelCase_ :Optional[Any] = do_rescale
lowerCAmelCase_ :Optional[int] = rescale_factor
lowerCAmelCase_ :Any = do_normalize
lowerCAmelCase_ :List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = int((256 / 224) * size["""shortest_edge"""] )
lowerCAmelCase_ :Tuple = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
lowerCAmelCase_ :str = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
lowerCAmelCase_ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase_ :Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :List[Any] = size if size is not None else self.size
lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :int = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :Optional[Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :Any = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCAmelCase_ :Tuple = [self.resize(__A , __A , __A ) for image in images]
if do_center_crop:
lowerCAmelCase_ :int = [self.center_crop(__A , __A ) for image in images]
if do_rescale:
lowerCAmelCase_ :Optional[int] = [self.rescale(__A , __A ) for image in images]
if do_normalize:
lowerCAmelCase_ :List[str] = [self.normalize(__A , __A , __A ) for image in images]
lowerCAmelCase_ :Tuple = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCAmelCase_ :List[str] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.get_config()
snake_case_ = 300
return config
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = DebertaModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
snake_case_ = model(a__ , token_type_ids=a__ )[0]
snake_case_ = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = DebertaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = DebertaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = DebertaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = DebertaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = DebertaModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = DebertaModel.from_pretrained("microsoft/deberta-base" )
snake_case_ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 85 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Tuple = hidden_sizes
__lowerCAmelCase : Dict = depths
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = num_labels
__lowerCAmelCase : int = scope
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = TFResNetModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : Tuple = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : Dict = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : int = False
A_ : Optional[int] = False
A_ : int = False
A_ : str = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFResNetModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Dict = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__lowerCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( __A ):
__A : Union[str, Any] = "visual_bert"
def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : str = visual_embedding_dim
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Any = layer_norm_eps
lowercase__ : Tuple = bypass_transformer
lowercase__ : Tuple = special_visual_initialize
| 87 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(A_ ), A_ )
return number - int(A_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 88 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'Salesforce/blip-image-captioning-base'
lowerCAmelCase : Tuple = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCAmelCase : Optional[int] = 'image_captioner'
lowerCAmelCase : List[str] = AutoModelForVisionaSeq
lowerCAmelCase : Tuple = ['image']
lowerCAmelCase : Optional[Any] = ['text']
def __init__( self : Dict ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : "Image" ):
return self.pre_processor(images=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ):
return self.model.generate(**_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
return self.pre_processor.batch_decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )[0].strip()
| 89 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetPipeline
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
snake_case_ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.dummy_unet
__lowerCamelCase = self.dummy_movq
__lowerCamelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
__lowerCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images
__lowerCamelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__lowerCamelCase = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 2_55.0
__lowerCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__lowerCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A robot, 4k photo'
__lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCamelCase = pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 90 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
create_all_state(1 , __a , __a , [] , __a )
return result
def _A (__a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__a , total_number - level + 2 ):
current_list.append(__a )
create_all_state(i + 1 , __a , level - 1 , __a , __a )
current_list.pop()
def _A (__a ) -> None:
"""simple docstring"""
for i in total_list:
print(*__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 91 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
with open(SCREAMING_SNAKE_CASE_ , encoding="utf_8" ) as f:
__lowerCAmelCase = csv.reader(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = []
for dataset in encoded_datasets:
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
__lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCAmelCase = with_conta
__lowerCAmelCase = with_conta
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
__lowerCAmelCase = with_conta
__lowerCAmelCase = with_conta
__lowerCAmelCase = mc_label
__lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE_ , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE_ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE_ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE_ , default=3_74 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
__lowerCAmelCase = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCAmelCase = ["_start_", "_delimiter_", "_classify_"]
__lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info("Encoding dataset..." )
__lowerCAmelCase = load_rocstories_dataset(args.train_dataset )
__lowerCAmelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCAmelCase = (train_dataset, eval_dataset)
__lowerCAmelCase = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
__lowerCAmelCase = model.config.n_positions // 2 - 2
__lowerCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCAmelCase = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = RandomSampler(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
__lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = SequentialSampler(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCAmelCase = args.max_steps
__lowerCAmelCase = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCAmelCase = list(model.named_parameters() )
__lowerCAmelCase = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__lowerCAmelCase = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__lowerCAmelCase = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
__lowerCAmelCase = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = tqdm(SCREAMING_SNAKE_CASE_ , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCAmelCase = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCAmelCase = model.module if hasattr(SCREAMING_SNAKE_CASE_ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc="Evaluating" ):
__lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = mc_logits.detach().cpu().numpy()
__lowerCAmelCase = mc_labels.to("cpu" ).numpy()
__lowerCAmelCase = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCAmelCase = eval_loss / nb_eval_steps
__lowerCAmelCase = eval_accuracy / nb_eval_examples
__lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCAmelCase = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__lowerCAmelCase = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 92 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : Optional[Any] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : List[str] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : int = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[str] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = flax_dict[key]
lowercase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : Tuple = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : Dict = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
lowercase_ : Tuple = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : Any = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Tuple = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : int = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[str] = PixaStructImageProcessor()
lowercase_ : Tuple = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : str = 4096
lowercase_ : Dict = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BioGptTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a :Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = '''lower newer'''
a :str = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = BioGptTokenizer(self.vocab_file , self.merges_file )
a :Union[str, Any] = '''lower'''
a :Tuple = ['''low''', '''er</w>''']
a :str = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :int = tokens + ['''<unk>''']
a :Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
a :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
a :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
a :int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
a :str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 94 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
UpperCAmelCase : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase : Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : Union[str, Any] =True
a__ : Any =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
order.append(SCREAMING_SNAKE_CASE )
return order
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : List[str] =True
a__ : Tuple =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return component
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
"""simple docstring"""
a__ : str =len(SCREAMING_SNAKE_CASE ) * [False]
a__ : dict[int, list[int]] ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =[]
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
a__ : Any =order[len(SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
a__ : List[str] =find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
components_list.append(SCREAMING_SNAKE_CASE )
return components_list
| 95 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = VideoMAEConfig()
set_architecture_configs(lowercase__ , lowercase__ )
if "finetuned" not in model_name:
_lowerCamelCase : Tuple = False
if "finetuned" in model_name:
_lowerCamelCase : Optional[Any] = 'huggingface/label-files'
if "kinetics" in model_name:
_lowerCamelCase : Union[str, Any] = 400
_lowerCamelCase : str = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_lowerCamelCase : List[Any] = 174
_lowerCamelCase : Optional[Any] = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( lowercase__ , lowercase__ ):
if "small" in model_name:
_lowerCamelCase : List[str] = 384
_lowerCamelCase : Dict = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[Any] = 16
_lowerCamelCase : Any = 12
_lowerCamelCase : int = 3
_lowerCamelCase : Dict = 192
_lowerCamelCase : Optional[Any] = 768
elif "large" in model_name:
_lowerCamelCase : Optional[Any] = 1024
_lowerCamelCase : Tuple = 4096
_lowerCamelCase : int = 24
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : Tuple = 12
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 512
_lowerCamelCase : Any = 2048
elif "huge" in model_name:
_lowerCamelCase : Dict = 1280
_lowerCamelCase : Any = 5120
_lowerCamelCase : str = 32
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : str = 12
_lowerCamelCase : Union[str, Any] = 8
_lowerCamelCase : Tuple = 640
_lowerCamelCase : Any = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def _snake_case ( lowercase__ ):
if "encoder." in name:
_lowerCamelCase : Any = name.replace('encoder.' , '' )
if "cls_token" in name:
_lowerCamelCase : Any = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_lowerCamelCase : Dict = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_lowerCamelCase : Tuple = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_lowerCamelCase : Any = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCamelCase : List[Any] = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_lowerCamelCase : Optional[Any] = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_lowerCamelCase : Optional[Any] = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_lowerCamelCase : List[Any] = name.replace('attn' , 'attention.self' )
if "attn" in name:
_lowerCamelCase : Optional[Any] = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_lowerCamelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_lowerCamelCase : Optional[int] = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_lowerCamelCase : Optional[Any] = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_lowerCamelCase : Optional[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_lowerCamelCase : int = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_lowerCamelCase : Union[str, Any] = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_lowerCamelCase : Any = name.replace('head' , 'classifier' )
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : List[str] = orig_state_dict.pop(lowercase__ )
if key.startswith('encoder.' ):
_lowerCamelCase : int = key.replace('encoder.' , '' )
if "qkv" in key:
_lowerCamelCase : str = key.split('.' )
if key.startswith('decoder.blocks' ):
_lowerCamelCase : Any = config.decoder_hidden_size
_lowerCamelCase : Optional[int] = int(key_split[2] )
_lowerCamelCase : List[str] = 'decoder.decoder_layers.'
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : Any = val[dim : dim * 2, :]
_lowerCamelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[int] = int(key_split[1] )
_lowerCamelCase : Tuple = 'videomae.encoder.layer.'
if "weight" in key:
_lowerCamelCase : List[str] = val[:dim, :]
_lowerCamelCase : Optional[int] = val[dim : dim * 2, :]
_lowerCamelCase : List[Any] = val[-dim:, :]
else:
_lowerCamelCase : int = val
return orig_state_dict
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowerCamelCase : Any = np.load(lowercase__ )
return list(lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = get_videomae_config(lowercase__ )
if "finetuned" in model_name:
_lowerCamelCase : str = VideoMAEForVideoClassification(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = VideoMAEForPreTraining(lowercase__ )
# download original checkpoint, hosted on Google Drive
_lowerCamelCase : Any = 'pytorch_model.bin'
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
_lowerCamelCase : Optional[int] = torch.load(lowercase__ , map_location='cpu' )
if "model" in files:
_lowerCamelCase : Any = files['model']
else:
_lowerCamelCase : Dict = files['module']
_lowerCamelCase : Union[str, Any] = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# verify model on basic input
_lowerCamelCase : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_lowerCamelCase : Tuple = prepare_video()
_lowerCamelCase : List[Any] = image_processor(lowercase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_lowerCamelCase : Tuple = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_lowerCamelCase : List[Any] = torch.load(lowercase__ )
_lowerCamelCase : Any = model(**lowercase__ )
_lowerCamelCase : Union[str, Any] = outputs.logits
_lowerCamelCase : Optional[int] = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_lowerCamelCase : Optional[Any] = torch.Size([1, 400] )
_lowerCamelCase : int = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
_lowerCamelCase : Optional[int] = torch.Size([1, 174] )
_lowerCamelCase : int = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
_lowerCamelCase : int = torch.Size([1, 1408, 1536] )
_lowerCamelCase : Dict = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
_lowerCamelCase : Any = torch.Size([1, 1408, 1536] )
_lowerCamelCase : Optional[Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
_lowerCamelCase : str = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
_lowerCamelCase : Any = torch.Size([1, 1408, 1536] )
_lowerCamelCase : int = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
_lowerCamelCase : List[str] = torch.Size([1, 400] )
_lowerCamelCase : Optional[int] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
_lowerCamelCase : Any = torch.Size([1, 400] )
_lowerCamelCase : Any = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_lowerCamelCase : Union[str, Any] = torch.Size([1, 400] )
_lowerCamelCase : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
_lowerCamelCase : Dict = torch.Size([1, 400] )
_lowerCamelCase : List[str] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
_lowerCamelCase : Any = torch.Size([1, 1408, 1536] )
_lowerCamelCase : Optional[Any] = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_lowerCamelCase : Union[str, Any] = torch.Size([1, 174] )
_lowerCamelCase : Dict = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
_lowerCamelCase : Optional[int] = torch.Size([1, 1408, 1536] )
_lowerCamelCase : str = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
_lowerCamelCase : Tuple = torch.Size([1, 174] )
_lowerCamelCase : Dict = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1E-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_lowerCamelCase : Dict = outputs.loss
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(lowercase__ , organization='nielsr' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 96 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[Any] = MobileBertConfig.from_json_file(__a )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase__ :List[str] = MobileBertForPreTraining(__a )
# Load weights from tf checkpoint
UpperCamelCase__ :str = load_tf_weights_in_mobilebert(__a , __a , __a )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 97 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 0 |
"""simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
import os
import pytest
from attr import dataclass
lowercase : Any = """us-east-1""" # defaults region
@dataclass
class A__ :
"""simple docstring"""
__A : str
__A : str = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__A : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__A : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return F'{self.framework}-transfromers-test'
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def A_ ( A__ ) -> Dict:
a__ : int = SageMakerTestEnvironment(framework=request.cls.framework )
| 99 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=sys.maxsize):
__SCREAMING_SNAKE_CASE = """bilinear"""
__SCREAMING_SNAKE_CASE = max_size
__SCREAMING_SNAKE_CASE = short_edge_length
def __call__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for img in imgs:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = img.shape[:2]
# later: provide list and randomly choose index for resize
__SCREAMING_SNAKE_CASE = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__SCREAMING_SNAKE_CASE = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__)
if h < w:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = size, scale * w
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__) > self.max_size:
__SCREAMING_SNAKE_CASE = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = newh * scale
__SCREAMING_SNAKE_CASE = neww * scale
__SCREAMING_SNAKE_CASE = int(neww + 0.5)
__SCREAMING_SNAKE_CASE = int(newh + 0.5)
if img.dtype == np.uinta:
__SCREAMING_SNAKE_CASE = Image.fromarray(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__SCREAMING_SNAKE_CASE = np.asarray(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__).squeeze(0)
img_augs.append(lowerCAmelCase__)
return img_augs
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__SCREAMING_SNAKE_CASE = cfg.INPUT.FORMAT
__SCREAMING_SNAKE_CASE = cfg.SIZE_DIVISIBILITY
__SCREAMING_SNAKE_CASE = cfg.PAD_VALUE
__SCREAMING_SNAKE_CASE = cfg.INPUT.MAX_SIZE_TEST
__SCREAMING_SNAKE_CASE = cfg.MODEL.DEVICE
__SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__SCREAMING_SNAKE_CASE = lambda lowerCAmelCase__: (x - self.pixel_mean) / self.pixel_std
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = tuple(max(lowerCAmelCase__) for s in zip(*[img.shape for img in images]))
__SCREAMING_SNAKE_CASE = [im.shape[-2:] for im in images]
__SCREAMING_SNAKE_CASE = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
return torch.stack(lowerCAmelCase__), torch.tensor(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=False):
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [images]
if single_image:
assert len(lowerCAmelCase__) == 1
for i in range(len(lowerCAmelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__SCREAMING_SNAKE_CASE = torch.tensor([im.shape[:2] for im in images])
__SCREAMING_SNAKE_CASE = self.aug(lowerCAmelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__SCREAMING_SNAKE_CASE = [self.normalizer(lowerCAmelCase__) for x in images]
# now pad them to do the following operations
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.pad(lowerCAmelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__SCREAMING_SNAKE_CASE = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
assert torch.isfinite(UpperCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase_ )
| 100 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=3_0 ,A__=2 ,A__=3 ,A__=True ,A__=True ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0 ,A__=0.02 ,A__=None ,A__=2 ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 1
def A__ ( self):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = ViTModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = ViTForMaskedImageModeling(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowercase = 1
lowercase = ViTForMaskedImageModeling(A__)
model.to(A__)
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase = model(A__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.type_sequence_label_size
lowercase = ViTForImageClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowercase = 1
lowercase = ViTForImageClassification(A__)
model.to(A__)
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : List[Any] =(
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : Dict =True
lowercase_ : Tuple =False
lowercase_ : Any =False
lowercase_ : str =False
def A__ ( self):
lowercase = ViTModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear))
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
@slow
def A__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = ViTModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def A__ ( self):
lowercase = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''').to(A__)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''').to(A__)
# forward pass
with torch.no_grad():
lowercase = model(**A__)
# verify the logits
lowercase = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,A__)
lowercase = torch.tensor([-0.2744, 0.8215, -0.0836]).to(A__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4))
@slow
def A__ ( self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowercase = ViTModel.from_pretrained('''facebook/dino-vits8''').to(A__)
lowercase = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=4_8_0)
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''')
lowercase = inputs.pixel_values.to(A__)
# forward pass
with torch.no_grad():
lowercase = model(A__ ,interpolate_pos_encoding=A__)
# verify the logits
lowercase = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape ,A__)
lowercase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(A__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self):
lowercase = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''')
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''')
lowercase = inputs.pixel_values.to(A__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase = model(A__)
| 101 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase ( _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowercase ( _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def lowercase ( _snake_case : np.ndarray , _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
__snake_case : Union[str, Any] = np.zeros_like(_snake_case )
__snake_case : List[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case : Optional[int] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE : str = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 102 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
A__ : str = '''bert-base-cased'''
A__ : Optional[int] = '''fp16'''
A__ : Optional[int] = '''bf16'''
A__ : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : str):
super().setUp()
lowerCAmelCase_ : int = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : Optional[Any] = F"""{i + 1}"""
lowerCAmelCase_ : List[Any] = strategy
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A_):
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : str = prefetch_policy
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def UpperCAmelCase__ ( self : int):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : str = state_dict_type
with mockenv_context(**A_):
lowerCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : str = AutoModel.from_pretrained(A_)
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCAmelCase_ : List[str] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowerCAmelCase_ : Optional[int] = '''2000'''
with mockenv_context(**A_):
lowerCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowerCAmelCase_ : str = self.dist_env.copy()
lowerCAmelCase_ : List[str] = '''TRANSFORMER_BASED_WRAP'''
lowerCAmelCase_ : Optional[int] = '''T5Layer'''
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
with self.assertRaises(A_) as cm:
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception))
lowerCAmelCase_ : Union[str, Any] = self.dist_env.copy()
lowerCAmelCase_ : List[Any] = '''SIZE_BASED_WRAP'''
lowerCAmelCase_ : List[Any] = '''0'''
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def UpperCAmelCase__ ( self : Optional[Any]):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCAmelCase_ : Dict = self.dist_env.copy()
lowerCAmelCase_ : List[str] = mp_dtype
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = Accelerator()
if mp_dtype == "fp16":
lowerCAmelCase_ : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
lowerCAmelCase_ : Any = torch.bfloataa
lowerCAmelCase_ : Optional[Any] = MixedPrecision(param_dtype=A_ , reduce_dtype=A_ , buffer_dtype=A_)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A_)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A_))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(A_)
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCAmelCase_ : Any = self.dist_env.copy()
lowerCAmelCase_ : Union[str, Any] = str(A_).lower()
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A_))
@require_fsdp
@require_multi_gpu
@slow
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any]):
super().setUp()
lowerCAmelCase_ : Optional[Any] = 0.82
lowerCAmelCase_ : Union[str, Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowerCAmelCase_ : Union[str, Any] = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCAmelCase_ : Union[str, Any] = 1_6_0
lowerCAmelCase_ : List[Any] = 1_6_0
lowerCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils)
lowerCAmelCase_ : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps'''])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = os.path.join(self.test_scripts_folder , '''test_performance.py''')
lowerCAmelCase_ : Tuple = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowerCAmelCase_ : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(A_):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''')
else:
cmd_config.append('''--mixed_precision=fp16''')
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''')
lowerCAmelCase_ : List[str] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(A_):
lowerCAmelCase_ : List[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
if strategy != "FULL_SHARD":
continue
lowerCAmelCase_ : int = len(A_)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCAmelCase_ : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""")
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
lowerCAmelCase_ : int = cmd_config[:-1]
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdir , '''epoch_0''')
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''')
lowerCAmelCase_ : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCAmelCase_ : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''])
else:
cmd_config.extend(['''--mixed_precision=no'''])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''])
for i, strategy in enumerate(A_):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
| 103 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i in range(len(A__ ) ):
__lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
for _ in range(A__ ):
# Create output image
__lowercase = Image.new('''RGB''' , (len(cells[0] ), len(A__ )) )
__lowercase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowercase = 255 - cells[y][x] * 255
__lowercase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowercase = new_generation(A__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 104 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[list[str]] , _lowercase : int , ) ->None:
'''simple docstring'''
a : Optional[int] = len(_lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowercase , _lowercase , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->None:
'''simple docstring'''
a : list[list[str]] = []
depth_first_search([] , [] , [] , _lowercase , _lowercase )
# Print all the boards
for board in boards:
for column in board:
print(_lowercase )
print("" )
print(len(_lowercase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 105 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
__UpperCamelCase : Any = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__UpperCamelCase : List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCamelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCamelCase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__UpperCamelCase : Dict = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 106 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
from __future__ import annotations
def __magic_name__ ( A : list[int | float], A : int, A : int ):
'''simple docstring'''
if len(A ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
a = (left + right) >> 1 # the middle
a = find_max(A, A, A ) # find max in range[left, mid]
a = find_max(A, mid + 1, A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 107 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int
a : TreeNode | None =None
a : TreeNode | None =None
lowerCAmelCase__ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a__ ( SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase , lowerCAmelCase : List[str] = get_distrib(node.left )
lowerCAmelCase , lowerCAmelCase : List[str] = get_distrib(node.right )
lowerCAmelCase : Union[str, Any] = 1 - left_distrib_excess
lowerCAmelCase : List[Any] = 1 - right_distrib_excess
lowerCAmelCase : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A: Any = get_logger(__name__)
A: Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE__ :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE__ :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
UpperCAmelCase : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
UpperCAmelCase : Dict = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCAmelCase : str = temperature
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : List[Any] = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> List[str]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCAmelCase : int = top_p
UpperCAmelCase : Optional[int] = filter_value
UpperCAmelCase : Optional[int] = min_tokens_to_keep
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1] )
UpperCAmelCase : Tuple = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value )
UpperCAmelCase : Tuple = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase : Optional[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase : List[str] = jnp.roll(_SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
UpperCAmelCase : Tuple = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> Any:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCAmelCase : Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = filter_value
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = scores.shape
UpperCAmelCase : Optional[int] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase , UpperCAmelCase : List[str] = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase : Union[str, Any] = topk_scores.flatten()
UpperCAmelCase : Optional[int] = topk_indices.flatten() + shift
UpperCAmelCase : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return next_scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = bos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase : Optional[int] = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Optional[int] = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Any = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase : int = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCAmelCase : List[Any] = min_length
UpperCAmelCase : Any = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase : List[Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = begin_index
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase : Optional[Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
UpperCAmelCase : List[Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase : List[Any] = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = scores.shape[0]
UpperCAmelCase : List[Any] = self.force_token_array[generation_idx]
UpperCAmelCase : int = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase : List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase : List[Any] = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
UpperCAmelCase : Dict = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = generate_config.eos_token_id
UpperCAmelCase : List[Any] = generate_config.no_timestamps_token_id
UpperCAmelCase : str = generate_config.no_timestamps_token_id + 1
UpperCAmelCase : int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , """max_initial_timestamp_index""" ):
UpperCAmelCase : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase : Union[str, Any] = model_config.vocab_size
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase : Any = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase : Dict = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
| 109 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
from __future__ import annotations
import math
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
)
def __snake_case ( ) -> None:
A_ : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34423]
A_ : Union[str, Any] = math.log(len(UpperCamelCase__ ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 300 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( a_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "AutoTokenizer"
lowerCAmelCase : Any = ["tokenizer"]
lowerCAmelCase : int = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Any ,_snake_case : int ,_snake_case : Dict=None ) -> int:
"""simple docstring"""
super().__init__(_A )
lowercase__ : Any = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : List[Any] ,_snake_case : Optional[int]="speaker_embeddings_path.json" ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
lowercase__ : Union[str, Any] = get_file_from_repo(
_A ,_A ,subfolder=kwargs.pop('''subfolder''' ,_A ) ,cache_dir=kwargs.pop('''cache_dir''' ,_A ) ,force_download=kwargs.pop('''force_download''' ,_A ) ,proxies=kwargs.pop('''proxies''' ,_A ) ,resume_download=kwargs.pop('''resume_download''' ,_A ) ,local_files_only=kwargs.pop('''local_files_only''' ,_A ) ,use_auth_token=kwargs.pop('''use_auth_token''' ,_A ) ,revision=kwargs.pop('''revision''' ,_A ) ,)
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(_A ,_A )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
lowercase__ : int = None
else:
with open(_A ) as speaker_embeddings_json:
lowercase__ : List[str] = json.load(_A )
else:
lowercase__ : str = None
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_A ,**_A )
return cls(tokenizer=_A ,speaker_embeddings=_A )
def UpperCAmelCase ( self : List[Any] ,_snake_case : int ,_snake_case : str="speaker_embeddings_path.json" ,_snake_case : List[str]="speaker_embeddings" ,_snake_case : Optional[int] = False ,**_snake_case : Optional[int] ,) -> Union[str, Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_A ,_A ,'''v2''' ) ,exist_ok=_A )
lowercase__ : List[Any] = {}
lowercase__ : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ : int = self._load_voice_preset(_A )
lowercase__ : Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] ,_A ,f"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=_A ,)
lowercase__ : Union[str, Any] = os.path.join(_A ,f"""{prompt_key}_{key}.npy""" )
lowercase__ : Optional[Any] = tmp_dict
with open(os.path.join(_A ,_A ) ,'''w''' ) as fp:
json.dump(_A ,_A )
super().save_pretrained(_A ,_A ,**_A )
def UpperCAmelCase ( self : int ,_snake_case : int = None ,**_snake_case : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.speaker_embeddings[voice_preset]
lowercase__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
lowercase__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' ,'''/''' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('''subfolder''' ,_A ) ,cache_dir=kwargs.pop('''cache_dir''' ,_A ) ,force_download=kwargs.pop('''force_download''' ,_A ) ,proxies=kwargs.pop('''proxies''' ,_A ) ,resume_download=kwargs.pop('''resume_download''' ,_A ) ,local_files_only=kwargs.pop('''local_files_only''' ,_A ) ,use_auth_token=kwargs.pop('''use_auth_token''' ,_A ) ,revision=kwargs.pop('''revision''' ,_A ) ,)
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
lowercase__ : Union[str, Any] = np.load(_A )
return voice_preset_dict
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[Any] = None ) -> List[Any]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : List[Any] ,_snake_case : List[str]=None ,_snake_case : List[str]=None ,_snake_case : Dict="pt" ,_snake_case : Union[str, Any]=256 ,_snake_case : Tuple=False ,_snake_case : Any=True ,_snake_case : str=False ,**_snake_case : Optional[int] ,) -> str:
"""simple docstring"""
if voice_preset is not None and not isinstance(_A ,_A ):
if (
isinstance(_A ,_A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ : List[str] = self._load_voice_preset(_A )
else:
if isinstance(_A ,_A ) and not voice_preset.endswith('''.npz''' ):
lowercase__ : List[str] = voice_preset + '''.npz'''
lowercase__ : List[Any] = np.load(_A )
if voice_preset is not None:
self._validate_voice_preset_dict(_A ,**_A )
lowercase__ : Dict = BatchFeature(data=_A ,tensor_type=_A )
lowercase__ : Dict = self.tokenizer(
_A ,return_tensors=_A ,padding='''max_length''' ,max_length=_A ,return_attention_mask=_A ,return_token_type_ids=_A ,add_special_tokens=_A ,**_A ,)
if voice_preset is not None:
lowercase__ : List[Any] = voice_preset
return encoded_text
| 16 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
from __future__ import annotations
from typing import Any
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case = 0 ):
lowercase , lowercase = row, column
lowercase = [[default_value for c in range(_A )] for r in range(_A )]
def __str__( self ):
lowercase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowercase = 0
for row_vector in self.array:
for obj in row_vector:
lowercase = max(_A , len(str(_A ) ) )
lowercase = F'''%{max_element_length}s'''
# Make string and return
def single_line(snake_case ) -> str:
nonlocal string_format_identifier
lowercase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_A ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not (isinstance(_A , (list, tuple) ) and len(_A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case ):
assert self.validate_indicies(_A )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case , snake_case ):
assert self.validate_indicies(_A )
lowercase = value
def __add__( self , snake_case ):
assert isinstance(_A , _A )
assert self.row == another.row and self.column == another.column
# Add
lowercase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase = self[r, c] + another[r, c]
return result
def __neg__( self ):
lowercase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase = -self[r, c]
return result
def __sub__( self , snake_case ):
return self + (-another)
def __mul__( self , snake_case ):
if isinstance(_A , (int, float) ): # Scalar multiplication
lowercase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase = self[r, c] * another
return result
elif isinstance(_A , _A ): # Matrix multiplication
assert self.column == another.row
lowercase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase = F'''Unsupported type given for another ({type(_A )})'''
raise TypeError(_A )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
assert isinstance(_A , _A ) and isinstance(_A , _A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase = v.transpose()
lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase_ ( ):
lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowercase = Matrix(3 , 1 , 0 )
lowercase , lowercase , lowercase = 1, 2, -3
lowercase = Matrix(3 , 1 , 0 )
lowercase , lowercase , lowercase = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' )
def UpperCAmelCase_ ( ):
import doctest
doctest.testmod()
testa()
| 195 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''', type=UpperCamelCase__, default='''data/dump.txt''', help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''', type=UpperCamelCase__, default='''bert''', choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''', type=UpperCamelCase__, default='''bert-base-uncased''', help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''', type=UpperCamelCase__, default='''data/dump''', help='''The dump file prefix.''' )
_UpperCamelCase = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
_UpperCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
_UpperCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
_UpperCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
_UpperCamelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path, '''r''', encoding='''utf8''' ) as fp:
_UpperCamelCase = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(UpperCamelCase__ )} examples to process.""" )
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 10000
_UpperCamelCase = time.time()
for text in data:
_UpperCamelCase = F"""{bos} {text.strip()} {sep}"""
_UpperCamelCase = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
_UpperCamelCase = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
_UpperCamelCase = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(UpperCamelCase__ )} examples processed.""" )
_UpperCamelCase = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
_UpperCamelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_UpperCamelCase = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
_UpperCamelCase = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(UpperCamelCase__, '''wb''' ) as handle:
pickle.dump(rslt_, UpperCamelCase__, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 324 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case : Tuple = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = ["CLIPFeatureExtractor"]
snake_case : List[str] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class A ( a_ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 298 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase (a_ ):
_lowercase = """Salesforce/blip-image-captioning-base"""
_lowercase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_lowercase = """image_captioner"""
_lowercase = AutoModelForVisionaSeq
_lowercase = ["""image"""]
_lowercase = ["""text"""]
def __init__( self: Any,*A_: Tuple,**A_: Optional[int] ):
'''simple docstring'''
requires_backends(self,['vision'] )
super().__init__(*_A,**_A )
def snake_case_ ( self: Union[str, Any],A_: Dict ):
'''simple docstring'''
return self.pre_processor(images=_A,return_tensors='pt' )
def snake_case_ ( self: List[str],A_: Optional[Any] ):
'''simple docstring'''
return self.model.generate(**_A )
def snake_case_ ( self: Dict,A_: List[str] ):
'''simple docstring'''
return self.pre_processor.batch_decode(_A,skip_special_tokens=_A )[0].strip()
| 310 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 0 |
from __future__ import annotations
import requests
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def lowerCamelCase_ ( _a = 10 ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase__ : Dict = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def lowerCamelCase_ ( _a = 10 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = hackernews_top_stories(UpperCamelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 131 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 0 |
def lowerCAmelCase__( lowercase : Any ) -> str:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("\'float\' object cannot be interpreted as an integer" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("\'str\' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__snake_case : Union[str, Any] = False
if num < 0:
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = -num
__snake_case : str = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( a_, a_, a_ ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] = False , )-> Any:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Tuple = nn.Embedding(_A , _A)
__lowerCAmelCase: Dict = nn.Embedding(_A , _A)
__lowerCAmelCase: str = False
__lowerCAmelCase: Union[str, Any] = nn.Dropout(p=_A)
__lowerCAmelCase: Union[str, Any] = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
__lowerCAmelCase: Any = nn.ModuleList()
for lyr_num in range(_A):
__lowerCAmelCase: Dict = TaBlock(_A)
self.encoders.append(_A)
__lowerCAmelCase: Optional[Any] = TaLayerNorm(_A)
__lowerCAmelCase: Any = nn.Dropout(p=_A)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple)-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = self.token_embedder(_A)
__lowerCAmelCase: int = encoder_input_tokens.shape[1]
__lowerCAmelCase: List[str] = torch.arange(_A , device=encoder_input_tokens.device)
x += self.position_encoding(_A)
__lowerCAmelCase: Union[str, Any] = self.dropout_pre(_A)
# inverted the attention mask
__lowerCAmelCase: Any = encoder_input_tokens.size()
__lowerCAmelCase: List[Any] = self.get_extended_attention_mask(_A , _A)
for lyr in self.encoders:
__lowerCAmelCase: Dict = lyr(_A , _A)[0]
__lowerCAmelCase: int = self.layer_norm(_A)
return self.dropout_post(_A), encoder_inputs_mask
| 217 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( a_ ):
"""simple docstring"""
__UpperCamelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self :Optional[Any] , **snake_case :int ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ : List[Any] = deprecated_arg[3:]
setattr(self , _A , not kwargs.pop(_A ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
A_ : Optional[Any] = kwargs.pop("torchscript" , self.torchscript )
A_ : str = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
A_ : Optional[Any] = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**_A )
__UpperCamelCase = field(default=a_ , metadata={'''help''': '''Trace the models using torchscript'''} )
__UpperCamelCase = field(default=a_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__UpperCamelCase = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
A_ : Optional[int] = torch.device("cpu" )
A_ : Dict = 0
elif is_torch_tpu_available():
A_ : List[str] = xm.xla_device()
A_ : Any = 0
else:
A_ : Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
A_ : Dict = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return self.n_gpu > 0
| 300 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCAmelCase_ = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
lowerCAmelCase_ = "|".join(sys.argv[1:])
lowerCAmelCase_ = re.compile(RF'''^({joined_dirs}).*?\.py$''')
lowerCAmelCase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 16 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = TaConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 195 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : List[str], lowercase : Dict, lowercase : Union[str, Any], lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowercase__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 324 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCamelCase_ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCamelCase_ = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def __magic_name__ ( __a : Union[str, Any] , __a : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = collections.OrderedDict()
UpperCamelCase__ = collections.OrderedDict()
UpperCamelCase__ = collections.OrderedDict()
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCamelCase__ ):
UpperCamelCase__ = b
UpperCamelCase__ = idx
for wd in b:
UpperCamelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A( a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|startoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_A ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
UpperCamelCase__ = do_clean_text
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_vocab_and_emoji(_A , _A )
UpperCamelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase_ (self ):
return len(self.raw_vocab )
def UpperCAmelCase_ (self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.subword_tokenizer.convert_id_to_token(_A )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """""".join(_A ).strip()
return out_string
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCamelCase__ = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = 0
if os.path.isdir(_A ):
UpperCamelCase__ = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
UpperCamelCase__ = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
UpperCamelCase__ = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_A , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase__ = token_index
writer.write(""",""".join(_A ) + """\n""" )
index += 1
with open(_A , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class __A( a_ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = vocab # same as swe
UpperCamelCase__ = ids_to_tokens # same as bpe
UpperCamelCase__ = emoji
UpperCamelCase__ = np.max([len(_A ) for w in self.vocab.keys()] )
UpperCamelCase__ = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
UpperCamelCase__ = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
UpperCamelCase__ = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
UpperCamelCase__ = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCamelCase__ = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCamelCase__ = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
UpperCamelCase__ = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
UpperCamelCase__ = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
UpperCamelCase__ = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__(self ):
return len(self.ids_to_tokens )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.content_repattera.sub("""<URL>""" , _A )
UpperCamelCase__ = self.content_repattera.sub("""<EMAIL>""" , _A )
UpperCamelCase__ = self.content_repattera.sub("""<TEL>""" , _A )
UpperCamelCase__ = self.content_repattera.sub("""<DATE>""" , _A )
UpperCamelCase__ = self.content_repattera.sub("""<DATE>""" , _A )
UpperCamelCase__ = self.content_repattera.sub("""<PRICE>""" , _A )
UpperCamelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCamelCase__ = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = text.replace(""" """ , """<SP>""" )
UpperCamelCase__ = text.replace(""" """ , """<SP>""" )
UpperCamelCase__ = text.replace("""\r\n""" , """<BR>""" )
UpperCamelCase__ = text.replace("""\n""" , """<BR>""" )
UpperCamelCase__ = text.replace("""\r""" , """<BR>""" )
UpperCamelCase__ = text.replace("""\t""" , """<TAB>""" )
UpperCamelCase__ = text.replace("""—""" , """ー""" )
UpperCamelCase__ = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCamelCase__ = text.replace(_A , _A )
if clean:
UpperCamelCase__ = self.clean_text(_A )
def check_simbol(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
UpperCamelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
UpperCamelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCamelCase__ = 0
UpperCamelCase__ = []
while pos < len(_A ):
UpperCamelCase__ = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
UpperCamelCase__ = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
UpperCamelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
UpperCamelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = sorted(_A , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0]
result.append(_A )
UpperCamelCase__ = e
else:
UpperCamelCase__ = pos + 1
UpperCamelCase__ = text[pos:end]
if check_simbol(_A ):
result.append("""<KIGOU>""" )
elif checkuae(_A ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
UpperCamelCase__ = end
return result
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="\n" ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode("""utf-8""" , errors="""replace""" ) )
UpperCamelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode("""utf-8""" , errors="""replace""" ) )
UpperCamelCase__ = """""".join(_A )
return text
| 244 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
snake_case : int = {
"allenai/led-base-16384": 1_63_84,
}
class snake_case_ (a_ ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = LEDTokenizer
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[int] ,__snake_case :str=None ,__snake_case :str=None ,__snake_case :Dict=None ,__snake_case :Optional[Any]="replace" ,__snake_case :Dict="<s>" ,__snake_case :Dict="</s>" ,__snake_case :Dict="</s>" ,__snake_case :Optional[int]="<s>" ,__snake_case :List[str]="<unk>" ,__snake_case :Union[str, Any]="<pad>" ,__snake_case :int="<mask>" ,__snake_case :List[str]=False ,__snake_case :List[str]=True ,**__snake_case :Any ,) -> Optional[int]:
super().__init__(
_A ,_A ,tokenizer_file=_A ,errors=_A ,bos_token=_A ,eos_token=_A ,sep_token=_A ,cls_token=_A ,unk_token=_A ,pad_token=_A ,mask_token=_A ,add_prefix_space=_A ,trim_offsets=_A ,**_A ,)
a__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_A ) != add_prefix_space:
a__ = getattr(_A ,pre_tok_state.pop('type' ) )
a__ = add_prefix_space
a__ = pre_tok_class(**_A )
a__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ = 'post_processor'
a__ = getattr(self.backend_tokenizer ,_A ,_A )
if tokenizer_component_instance:
a__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ = tuple(state['sep'] )
if "cls" in state:
a__ = tuple(state['cls'] )
a__ = False
if state.get('add_prefix_space' ,_A ) != add_prefix_space:
a__ = add_prefix_space
a__ = True
if state.get('trim_offsets' ,_A ) != trim_offsets:
a__ = trim_offsets
a__ = True
if changes_to_apply:
a__ = getattr(_A ,state.pop('type' ) )
a__ = component_class(**_A )
setattr(self.backend_tokenizer ,_A ,_A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase__( self :List[str] ) -> List[Any]:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ) -> str:
a__ = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else value
a__ = value
def lowerCamelCase__( self :Tuple ,*__snake_case :List[str] ,**__snake_case :List[Any] ) -> Tuple:
a__ = kwargs.get('is_split_into_words' ,_A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_A ,**_A )
def lowerCamelCase__( self :str ,*__snake_case :Dict ,**__snake_case :Tuple ) -> str:
a__ = kwargs.get('is_split_into_words' ,_A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_A ,**_A )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] = None ) -> Dict:
a__ = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ,__snake_case :int=None ) -> Any:
a__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__( self :int ,__snake_case :Tuple ,__snake_case :Tuple = None ) -> Optional[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[str] ,__snake_case :List[str] = None ,__snake_case :Any = PaddingStrategy.DO_NOT_PAD ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,) -> List[Any]:
a__ = super()._pad(
encoded_inputs=_A ,max_length=_A ,padding_strategy=_A ,pad_to_multiple_of=_A ,return_attention_mask=_A ,)
# Load from model defaults
if return_attention_mask is None:
a__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ = len(encoded_inputs['global_attention_mask'] ) != len(_A )
if needs_to_be_padded:
a__ = len(_A ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 240 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCamelCase : Union[str, Any] = MaskFormerConfig(backbone_config=UpperCamelCase__ )
__UpperCamelCase : Optional[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__UpperCamelCase : Optional[Any] = 847
__UpperCamelCase : Optional[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__UpperCamelCase : Optional[int] = 150
__UpperCamelCase : Dict = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__UpperCamelCase : List[Any] = 171
__UpperCamelCase : int = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__UpperCamelCase : Any = 133
__UpperCamelCase : str = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__UpperCamelCase : int = 19
__UpperCamelCase : int = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__UpperCamelCase : Tuple = 65
__UpperCamelCase : Tuple = "mapillary-vistas-id2label.json"
__UpperCamelCase : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = dct.pop(UpperCamelCase__ )
__UpperCamelCase : Any = val
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase : Optional[Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCamelCase : int = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Tuple = in_proj_weight[:dim, :]
__UpperCamelCase : List[str] = in_proj_bias[: dim]
__UpperCamelCase : Any = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase : List[str] = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase : int = in_proj_weight[
-dim :, :
]
__UpperCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__UpperCamelCase : Tuple = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__UpperCamelCase : List[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[: hidden_size, :]
__UpperCamelCase : Dict = in_proj_bias[:config.hidden_size]
__UpperCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
__UpperCamelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase : Tuple = in_proj_weight[-hidden_size :, :]
__UpperCamelCase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__UpperCamelCase : str = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__UpperCamelCase : Dict = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Dict = in_proj_weight[: hidden_size, :]
__UpperCamelCase : List[str] = in_proj_bias[:config.hidden_size]
__UpperCamelCase : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__UpperCamelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase : List[str] = in_proj_weight[-hidden_size :, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ):
__UpperCamelCase : List[Any] = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , "rb" ) as f:
__UpperCamelCase : Any = pickle.load(UpperCamelCase__ )
__UpperCamelCase : List[str] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__UpperCamelCase : Any = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
__UpperCamelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
__UpperCamelCase : Dict = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__UpperCamelCase : List[Any] = prepare_img()
if "vistas" in model_name:
__UpperCamelCase : Optional[int] = 65
elif "cityscapes" in model_name:
__UpperCamelCase : List[str] = 65_535
else:
__UpperCamelCase : List[str] = 255
__UpperCamelCase : List[str] = True if "ade" in model_name else False
__UpperCamelCase : List[Any] = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
__UpperCamelCase : Optional[Any] = image_processor(UpperCamelCase__ , return_tensors="pt" )
__UpperCamelCase : Optional[Any] = model(**UpperCamelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 298 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (a_ ):
_lowercase = """megatron-bert"""
def __init__( self: List[str],A_: Dict=2_9056,A_: str=1024,A_: Dict=24,A_: List[str]=16,A_: Optional[Any]=4096,A_: str="gelu",A_: Optional[Any]=0.1,A_: Optional[Any]=0.1,A_: Optional[Any]=512,A_: int=2,A_: Tuple=0.0_2,A_: Tuple=1E-12,A_: str=0,A_: Tuple="absolute",A_: int=True,**A_: List[str],):
'''simple docstring'''
super().__init__(pad_token_id=_A,**_A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
| 310 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
def lowerCamelCase_ ( _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCAmelCase__ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCAmelCase__ : int = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase__ : Optional[int] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase__ : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 131 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 0 |
from ...processing_utils import ProcessorMixin
class _lowerCamelCase ( a_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="SpeechT5FeatureExtractor"
UpperCAmelCase_ : Tuple ="SpeechT5Tokenizer"
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = kwargs.pop("audio" , _A )
__snake_case : Tuple = kwargs.pop("text" , _A )
__snake_case : Dict = kwargs.pop("text_target" , _A )
__snake_case : List[Any] = kwargs.pop("audio_target" , _A )
__snake_case : Optional[Any] = kwargs.pop("sampling_rate" , _A )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
__snake_case : Tuple = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
__snake_case : Any = self.tokenizer(_A , **_A )
else:
__snake_case : List[Any] = None
if audio_target is not None:
__snake_case : Union[str, Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
__snake_case : Tuple = targets["input_values"]
elif text_target is not None:
__snake_case : List[Any] = self.tokenizer(_A , **_A )
__snake_case : List[Any] = targets["input_ids"]
else:
__snake_case : str = None
if inputs is None:
return targets
if targets is not None:
__snake_case : List[str] = labels
__snake_case : Optional[int] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case : List[str] = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = kwargs.pop("input_values" , _A )
__snake_case : Optional[int] = kwargs.pop("input_ids" , _A )
__snake_case : Optional[int] = kwargs.pop("labels" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
__snake_case : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
__snake_case : Optional[Any] = self.tokenizer.pad(_A , **_A )
else:
__snake_case : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
__snake_case : Optional[int] = self.tokenizer.pad(_A , **_A )
__snake_case : List[Any] = targets["input_ids"]
else:
__snake_case : List[Any] = self.feature_extractor.feature_size
__snake_case : Optional[int] = self.feature_extractor.num_mel_bins
__snake_case : str = self.feature_extractor.pad(_A , *_A , **_A )
__snake_case : Tuple = feature_size_hack
__snake_case : Optional[int] = targets["input_values"]
else:
__snake_case : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
__snake_case : Dict = labels
__snake_case : Tuple = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case : Any = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 326 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
"""simple docstring"""
import operator as op
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase: Any = []
__lowerCAmelCase: int = lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation
__lowerCAmelCase: Tuple = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(UpperCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
else:
__lowerCAmelCase: Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
__lowerCAmelCase: Union[str, Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
stack.append(
str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(UpperCamelCase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 217 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class __magic_name__ ( a_ ):
"""simple docstring"""
def __init__( self :Dict , **snake_case :Tuple ):
'''simple docstring'''
super().__init__(**_A )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self :Optional[Any] , snake_case :List[str] , **snake_case :Optional[int] ):
'''simple docstring'''
return super().__call__(_A , **_A )
def SCREAMING_SNAKE_CASE ( self :Tuple , **snake_case :Any ):
'''simple docstring'''
A_ : int = {}
if "candidate_labels" in kwargs:
A_ : Tuple = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
A_ : Any = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :Any=None , snake_case :Tuple="This is a sound of {}." ):
'''simple docstring'''
if isinstance(_A , _A ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ : Tuple = requests.get(_A ).content
else:
with open(_A , "rb" ) as f:
A_ : int = f.read()
if isinstance(_A , _A ):
A_ : int = ffmpeg_read(_A , self.feature_extractor.sampling_rate )
if not isinstance(_A , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
A_ : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
A_ : Optional[int] = candidate_labels
A_ : Tuple = [hypothesis_template.format(_A ) for x in candidate_labels]
A_ : Optional[Any] = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
A_ : Tuple = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str ):
'''simple docstring'''
A_ : Optional[int] = model_inputs.pop("candidate_labels" )
A_ : List[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _A ):
A_ : str = text_inputs[0]
else:
# Batching case.
A_ : Dict = text_inputs[0][0]
A_ : Dict = self.model(**_A , **_A )
A_ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] ):
'''simple docstring'''
A_ : Any = model_outputs.pop("candidate_labels" )
A_ : Dict = model_outputs["logits"][0]
if self.framework == "pt":
A_ : str = logits.softmax(dim=0 )
A_ : List[str] = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
A_ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda snake_case : -x[0] )
]
return result
| 300 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[int] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase__ : str = 1_28
elif "12-12" in model_name:
lowercase__ : Optional[int] = 12
lowercase__ : Any = 12
elif "14-14" in model_name:
lowercase__ : Any = 14
lowercase__ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase__ : Tuple = 16
lowercase__ : int = 16
else:
raise ValueError('''Model not supported''' )
lowercase__ : int = '''huggingface/label-files'''
if "speech-commands" in model_name:
lowercase__ : List[Any] = 35
lowercase__ : Optional[int] = '''speech-commands-v2-id2label.json'''
else:
lowercase__ : Optional[int] = 5_27
lowercase__ : int = '''audioset-id2label.json'''
lowercase__ : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : List[str] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
if "module.v" in name:
lowercase__ : Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
lowercase__ : int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
lowercase__ : Any = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
lowercase__ : Optional[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase__ : int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
lowercase__ : List[str] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase__ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__ : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase__ : Any = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
lowercase__ : Optional[Any] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
lowercase__ : str = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowercase__ : Any = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowercase__ : Tuple = key.split('''.''' )
lowercase__ : List[str] = int(key_split[3] )
lowercase__ : Tuple = config.hidden_size
if "weight" in key:
lowercase__ : int = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : int = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
lowercase__ : Any = get_audio_spectrogram_transformer_config(UpperCamelCase__ )
lowercase__ : Optional[int] = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
lowercase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )
# remove some keys
remove_keys(UpperCamelCase__ )
# rename some keys
lowercase__ : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# load 🤗 model
lowercase__ : int = ASTForAudioClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase__ : Union[str, Any] = -4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8
lowercase__ : List[str] = 4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6
lowercase__ : List[Any] = 10_24 if '''speech-commands''' not in model_name else 1_28
lowercase__ : Dict = ASTFeatureExtractor(mean=UpperCamelCase__ , std=UpperCamelCase__ , max_length=UpperCamelCase__ )
if "speech-commands" in model_name:
lowercase__ : Any = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
lowercase__ : Union[str, Any] = dataset[0]['''audio''']['''array''']
else:
lowercase__ : Tuple = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
lowercase__ , lowercase__ : List[str] = torchaudio.load(UpperCamelCase__ )
lowercase__ : List[str] = waveform.squeeze().numpy()
lowercase__ : Union[str, Any] = feature_extractor(UpperCamelCase__ , sampling_rate=1_60_00 , return_tensors='''pt''' )
# forward pass
lowercase__ : Tuple = model(**UpperCamelCase__ )
lowercase__ : Optional[int] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase__ : Union[str, Any] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase__ : Optional[Any] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase__ : Dict = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase__ : Dict = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase__ : str = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase__ : str = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase__ : Any = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase__ : List[Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
'''simple docstring'''
from math import sqrt
def a__ ( lowercase : Optional[int] ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(sqrt(UpperCamelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowercase : Tuple = 10001 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 324 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
def __magic_name__ ( __a : int = 100 ):
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = 0
UpperCamelCase__ = n + 1 # maximum limit
for a in range(2 , UpperCamelCase__ ):
for b in range(2 , UpperCamelCase__ ):
UpperCamelCase__ = a**b # calculates the current power
collect_powers.add(UpperCamelCase__ ) # adds the result to the set
return len(UpperCamelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 244 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : List[Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A ( unittest.TestCase ):
'''simple docstring'''
A = inspect.getfile(accelerate.test_utils )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
A = ["accelerate", "launch"]
A = Path.home() / ".cache/huggingface/accelerate"
A = "default_config.yaml"
A = config_folder / config_file
A = config_folder / "_default_config.yaml"
A = Path("tests/test_configs" )
@classmethod
def a_ (cls ) -> Tuple:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a_ (cls ) -> Tuple:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a_ (self ) -> Dict:
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=_A ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(_A ), self.test_file_path] , env=os.environ.copy() )
def a_ (self ) -> Tuple:
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class A ( unittest.TestCase ):
'''simple docstring'''
A = "test-tpu"
A = "us-central1-a"
A = "ls"
A = ["accelerate", "tpu-config"]
A = "cd /usr/share"
A = "tests/test_samples/test_command_file.sh"
A = "Running gcloud compute tpus tpu-vm ssh"
def a_ (self ) -> Tuple:
__UpperCamelCase : int = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _A , )
def a_ (self ) -> List[Any]:
__UpperCamelCase : List[str] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _A , )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=_A )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _A , )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _A , )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , _A , )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _A , )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _A , )
def a_ (self ) -> str:
__UpperCamelCase : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , _A , )
def a_ (self ) -> int:
__UpperCamelCase : int = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=_A , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , _A , )
| 298 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
import enum
import shutil
import sys
__snake_case = shutil.get_terminal_size()
__snake_case = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowerCamelCase (enum.Enum ):
_lowercase = 0
_lowercase = 1
def _A ( _lowercase , _lowercase="" ) -> Any:
"""simple docstring"""
sys.stdout.write(str(UpperCamelCase__ ) + end )
sys.stdout.flush()
def _A ( _lowercase , _lowercase , _lowercase="" ) -> Optional[int]:
"""simple docstring"""
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , UpperCamelCase__ )
def _A ( ) -> List[str]:
"""simple docstring"""
forceWrite('\r' )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def _A ( ) -> Optional[int]:
"""simple docstring"""
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def _A ( ) -> Dict:
"""simple docstring"""
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 310 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"vocab_file": "spiece.model"}
lowerCamelCase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowerCamelCase = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowerCamelCase = "▁"
class _a ( a_):
_a : Tuple = VOCAB_FILES_NAMES
_a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , _SCREAMING_SNAKE_CASE : List[Any]="[SEP]" , _SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , _SCREAMING_SNAKE_CASE : List[str]="[SEP]" , _SCREAMING_SNAKE_CASE : str="<pad>" , _SCREAMING_SNAKE_CASE : Optional[int]="[CLS]" , _SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , _SCREAMING_SNAKE_CASE : Union[str, Any] = None , **_SCREAMING_SNAKE_CASE : Optional[int] , )-> List[str]:
lowerCAmelCase__ : Dict = (
AddedToken(_A , lstrip=_A , rstrip=_A , normalized=_A )
if isinstance(_A , _A )
else mask_token
)
lowerCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Optional[int] = remove_space
lowerCAmelCase__ : str = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[Any]:
return len(self.sp_model )
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> Any:
lowerCAmelCase__ : str = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
return state
def __setstate__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Tuple:
lowerCAmelCase__ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> int:
if self.remove_space:
lowerCAmelCase__ : int = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase__ : Optional[Any] = inputs
lowerCAmelCase__ : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase__ : Dict = unicodedata.normalize('''NFKD''' , _A )
lowerCAmelCase__ : str = ''''''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
lowerCAmelCase__ : Optional[int] = outputs.lower()
return outputs
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Any:
lowerCAmelCase__ : List[str] = self.preprocess_text(_A )
lowerCAmelCase__ : Dict = self.sp_model.encode(_A , out_type=_A )
lowerCAmelCase__ : Dict = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase__ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : int = cur_pieces[1:]
else:
lowerCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Tuple:
return self.sp_model.PieceToId(_A )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Dict )-> str:
return self.sp_model.IdToPiece(_A )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> Optional[Any]:
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Any = ''''''
lowerCAmelCase__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(_A )
lowerCAmelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] = None )-> Any:
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict = None , _SCREAMING_SNAKE_CASE : Optional[int] = False )-> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict = None )-> Dict:
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] = None )-> List[Any]:
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : List[Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 131 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _lowerCamelCase ( a_ ):
"""simple docstring"""
UpperCAmelCase_ : Any ="longformer"
def __init__( self , UpperCAmelCase = 512 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0 , UpperCAmelCase = 2 , UpperCAmelCase = 30522 , UpperCAmelCase = 768 , UpperCAmelCase = 12 , UpperCAmelCase = 12 , UpperCAmelCase = 3072 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 512 , UpperCAmelCase = 2 , UpperCAmelCase = 0.02 , UpperCAmelCase = 1E-12 , UpperCAmelCase = False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__snake_case : Dict = attention_window
__snake_case : Tuple = sep_token_id
__snake_case : Tuple = bos_token_id
__snake_case : List[Any] = eos_token_id
__snake_case : Optional[int] = vocab_size
__snake_case : int = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Optional[int] = onnx_export
class _lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = "default" , UpperCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A , _A )
__snake_case : List[str] = True
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = super().outputs
if self.task == "default":
__snake_case : int = {0: "batch"}
return outputs
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Dict:
'''simple docstring'''
__snake_case : int = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__snake_case : List[str] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
__snake_case : Optional[Any] = 1
return inputs
| 326 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0 ) -> int:
__lowerCAmelCase: List[str] = (n * (n + 1) // 2) ** 2
__lowerCAmelCase: int = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[Any]:
assert x is not None
assert y is not None
A_ : int = len(UpperCamelCase__ )
A_ : str = len(UpperCamelCase__ )
# declaring the array for storing the dp values
A_ : Dict = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A_ : Tuple = 1 if x[i - 1] == y[j - 1] else 0
A_ : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A_ : Any = ""
A_ , A_ : Optional[int] = m, n
while i > 0 and j > 0:
A_ : Dict = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A_ : List[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = "AGGTAB"
_lowerCAmelCase : Union[str, Any] = "GXTXAYB"
_lowerCAmelCase : Any = 4
_lowerCAmelCase : int = "GTAB"
_lowerCAmelCase : Union[str, Any] = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 300 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = CTRLTokenizer
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[int] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : Any = dict(zip(_A ,range(len(_A ) ) ) )
lowercase__ : int = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Tuple = {'''unk_token''': '''<unk>'''}
lowercase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def UpperCAmelCase ( self : Optional[int] ,**_snake_case : List[str] ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**_A )
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
lowercase__ : List[str] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : int = tokenizer.tokenize(_A )
self.assertListEqual(_A ,_A )
lowercase__ : Union[str, Any] = tokens + [tokenizer.unk_token]
lowercase__ : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,_A )
| 16 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = TFViTModel(config=_A )
lowercase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase = self.image_size // 2
lowercase = pixel_values[:, :, :image_size, :image_size]
lowercase = model(_A , interpolate_pos_encoding=_A , training=_A )
lowercase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.type_sequence_label_size
lowercase = TFViTForImageClassification(_A )
lowercase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase = self.image_size // 2
lowercase = pixel_values[:, :, :image_size, :image_size]
lowercase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = TFViTForImageClassification(_A )
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCamelCase : Dict = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFViTModelTester(self )
lowercase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_A )
lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_A , return_tensors='tf' )
# forward pass
lowercase = model(**_A )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 195 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 0 |
'''simple docstring'''
def a__ ( lowercase : int ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
_UpperCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 244 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Dict = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
snake_case : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowercase ( __lowerCAmelCase : Dict ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a__ = model_type_to_module_name(UpperCamelCase__ )
a__ = importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(UpperCamelCase__ , UpperCamelCase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase__ , '__name__' , UpperCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a__ = importlib.import_module('transformers' )
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
return getattr(UpperCamelCase__ , UpperCamelCase__ )
return None
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict = None , __lowerCAmelCase : List[str] = False , __lowerCAmelCase : List[Any] = False , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Tuple = False , **__lowerCAmelCase : int , ):
a__ = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(UpperCamelCase__ , encoding='utf-8' ) as reader:
return json.load(UpperCamelCase__ )
class snake_case_ :
def __init__( self :List[str] ) -> Tuple:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_A )
def lowerCamelCase__( cls :Any ,__snake_case :List[Any] ,**__snake_case :Tuple ) -> Tuple:
a__ = kwargs.pop('config' ,_A )
a__ = kwargs.pop('trust_remote_code' ,_A )
a__ = True
a__ , a__ = ImageProcessingMixin.get_image_processor_dict(_A ,**_A )
a__ = config_dict.get('image_processor_type' ,_A )
a__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
a__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a__ = config_dict.pop('feature_extractor_type' ,_A )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
a__ = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
a__ = config_dict['auto_map']['AutoFeatureExtractor']
a__ = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_A ,_A ):
a__ = AutoConfig.from_pretrained(_A ,**_A )
# It could be in `config.image_processor_type``
a__ = getattr(_A ,'image_processor_type' ,_A )
if hasattr(_A ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
a__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
a__ = image_processor_class_from_name(_A )
a__ = image_processor_auto_map is not None
a__ = image_processor_class is not None or type(_A ) in IMAGE_PROCESSOR_MAPPING
a__ = resolve_trust_remote_code(
_A ,_A ,_A ,_A )
if has_remote_code and trust_remote_code:
a__ = get_class_from_dynamic_module(
_A ,_A ,**_A )
a__ = kwargs.pop('code_revision' ,_A )
if os.path.isdir(_A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_A ,**_A )
elif image_processor_class is not None:
return image_processor_class.from_dict(_A ,**_A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_A ) in IMAGE_PROCESSOR_MAPPING:
a__ = IMAGE_PROCESSOR_MAPPING[type(_A )]
return image_processor_class.from_dict(_A ,**_A )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def lowerCamelCase__( __snake_case :int ,__snake_case :List[str] ) -> List[str]:
IMAGE_PROCESSOR_MAPPING.register(_A ,_A )
| 240 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if exponent == 1:
return base
if exponent % 2 == 0:
__UpperCamelCase : str = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __lowerCAmelCase ( snake_case__ = 1_777 , snake_case__ = 1_855 , snake_case__ = 8 ):
__UpperCamelCase : int = base
for _ in range(1 , UpperCamelCase__ ):
__UpperCamelCase : Optional[Any] = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 298 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__snake_case = {
"squeezebert/squeezebert-uncased": 5_1_2,
"squeezebert/squeezebert-mnli": 5_1_2,
"squeezebert/squeezebert-mnli-headless": 5_1_2,
}
__snake_case = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __lowerCamelCase (a_ ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = SqueezeBertTokenizer
def __init__( self: int,A_: Any=None,A_: List[Any]=None,A_: Tuple=True,A_: Optional[int]="[UNK]",A_: Optional[int]="[SEP]",A_: int="[PAD]",A_: Union[str, Any]="[CLS]",A_: Tuple="[MASK]",A_: Tuple=True,A_: str=None,**A_: int,):
'''simple docstring'''
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',_A ) != do_lower_case
or normalizer_state.get('strip_accents',_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars',_A ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(_A,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**_A )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: List[Any],A_: int,A_: int=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Any,A_: Dict,A_: List[Any] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: Optional[int],A_: int,A_: Any = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
| 310 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase = logging.get_logger(__name__)
# General docstring
lowerCamelCase = "ResNetConfig"
# Base docstring
lowerCamelCase = "microsoft/resnet-50"
lowerCamelCase = [1, 2048, 7, 7]
# Image classification docstring
lowerCamelCase = "microsoft/resnet-50"
lowerCamelCase = "tiger cat"
lowerCamelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _a ( nn.Module):
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] = 3 , _SCREAMING_SNAKE_CASE : str = 1 , _SCREAMING_SNAKE_CASE : Optional[int] = "relu" )-> List[Any]:
super().__init__()
lowerCAmelCase__ : Any = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , bias=_A )
lowerCAmelCase__ : Tuple = nn.BatchNormad(_A )
lowerCAmelCase__ : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int )-> List[str]:
lowerCAmelCase__ : Dict = self.convolution(_A )
lowerCAmelCase__ : Dict = self.normalization(_A )
lowerCAmelCase__ : List[Any] = self.activation(_A )
return hidden_state
class _a ( nn.Module):
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : List[str] )-> int:
super().__init__()
lowerCAmelCase__ : Optional[int] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCAmelCase__ : Union[str, Any] = config.num_channels
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Any:
lowerCAmelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ : List[str] = self.embedder(_A )
lowerCAmelCase__ : List[str] = self.pooler(_A )
return embedding
class _a ( nn.Module):
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int = 2 )-> List[str]:
super().__init__()
lowerCAmelCase__ : Optional[Any] = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
lowerCAmelCase__ : List[str] = nn.BatchNormad(_A )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[str] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.convolution(_A )
lowerCAmelCase__ : Optional[int] = self.normalization(_A )
return hidden_state
class _a ( nn.Module):
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict = 1 , _SCREAMING_SNAKE_CASE : List[Any] = "relu" )-> Tuple:
super().__init__()
lowerCAmelCase__ : int = in_channels != out_channels or stride != 1
lowerCAmelCase__ : Dict = (
ResNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : str = nn.Sequential(
ResNetConvLayer(_A , _A , stride=_A ) , ResNetConvLayer(_A , _A , activation=_A ) , )
lowerCAmelCase__ : Optional[int] = ACTaFN[activation]
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )-> List[Any]:
lowerCAmelCase__ : str = hidden_state
lowerCAmelCase__ : Optional[Any] = self.layer(_A )
lowerCAmelCase__ : Dict = self.shortcut(_A )
hidden_state += residual
lowerCAmelCase__ : Any = self.activation(_A )
return hidden_state
class _a ( nn.Module):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] = 1 , _SCREAMING_SNAKE_CASE : Union[str, Any] = "relu" , _SCREAMING_SNAKE_CASE : Tuple = 4 )-> List[str]:
super().__init__()
lowerCAmelCase__ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[Any] = out_channels // reduction
lowerCAmelCase__ : Any = (
ResNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Optional[Any] = nn.Sequential(
ResNetConvLayer(_A , _A , kernel_size=1 ) , ResNetConvLayer(_A , _A , stride=_A ) , ResNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
lowerCAmelCase__ : Optional[Any] = ACTaFN[activation]
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int )-> Dict:
lowerCAmelCase__ : str = hidden_state
lowerCAmelCase__ : List[Any] = self.layer(_A )
lowerCAmelCase__ : Tuple = self.shortcut(_A )
hidden_state += residual
lowerCAmelCase__ : Optional[int] = self.activation(_A )
return hidden_state
class _a ( nn.Module):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int = 2 , _SCREAMING_SNAKE_CASE : Dict = 2 , )-> int:
super().__init__()
lowerCAmelCase__ : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowerCAmelCase__ : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_A , _A , stride=_A , activation=config.hidden_act ) , *[layer(_A , _A , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Tuple:
lowerCAmelCase__ : Any = input
for layer in self.layers:
lowerCAmelCase__ : Dict = layer(_A )
return hidden_state
class _a ( nn.Module):
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : List[str] )-> Optional[Any]:
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(ResNetStage(_A , _A , _A , depth=_A ) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple = False , _SCREAMING_SNAKE_CASE : Any = True )-> int:
lowerCAmelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : Union[str, Any] = hidden_states + (hidden_state,)
lowerCAmelCase__ : List[str] = stage_module(_A )
if output_hidden_states:
lowerCAmelCase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=_A , )
class _a ( a_):
_a : List[Any] = ResNetConfig
_a : Any = '''resnet'''
_a : Dict = '''pixel_values'''
_a : str = True
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple )-> Optional[Any]:
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str=False )-> Optional[Any]:
if isinstance(_A , _A ):
lowerCAmelCase__ : Tuple = value
lowerCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , a_ , )
class _a ( a_):
def __init__( self : str , _SCREAMING_SNAKE_CASE : List[Any] )-> Any:
super().__init__(_A )
lowerCAmelCase__ : Optional[Any] = config
lowerCAmelCase__ : Optional[Any] = ResNetEmbeddings(_A )
lowerCAmelCase__ : Optional[int] = ResNetEncoder(_A )
lowerCAmelCase__ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any = None , _SCREAMING_SNAKE_CASE : Dict = None )-> List[Any]:
lowerCAmelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : str = self.embedder(_A )
lowerCAmelCase__ : int = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
lowerCAmelCase__ : Dict = encoder_outputs[0]
lowerCAmelCase__ : Dict = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , a_ , )
class _a ( a_):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> str:
super().__init__(_A )
lowerCAmelCase__ : List[Any] = config.num_labels
lowerCAmelCase__ : List[str] = ResNetModel(_A )
# classification head
lowerCAmelCase__ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : List[str] = None , _SCREAMING_SNAKE_CASE : Dict = None , )-> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Any = self.resnet(_A , output_hidden_states=_A , return_dict=_A )
lowerCAmelCase__ : Tuple = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : List[Any] = self.classifier(_A )
lowerCAmelCase__ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : Optional[int] = '''single_label_classification'''
else:
lowerCAmelCase__ : Any = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ : Any = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : Optional[int] = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : Optional[Any] = CrossEntropyLoss()
lowerCAmelCase__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : str = BCEWithLogitsLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(_A , _A )
if not return_dict:
lowerCAmelCase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , a_ , )
class _a ( a_ , a_):
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> Optional[int]:
super().__init__(_A )
super()._init_backbone(_A )
lowerCAmelCase__ : List[Any] = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase__ : List[str] = ResNetEmbeddings(_A )
lowerCAmelCase__ : List[str] = ResNetEncoder(_A )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] = None , _SCREAMING_SNAKE_CASE : Tuple = None )-> str:
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : List[str] = self.embedder(_A )
lowerCAmelCase__ : str = self.encoder(_A , output_hidden_states=_A , return_dict=_A )
lowerCAmelCase__ : Dict = outputs.hidden_states
lowerCAmelCase__ : List[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase__ : Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_A , )
| 131 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any] ) -> Optional[int]:
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , "config.json" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "config.json" ) ):
os.remove(os.path.join(UpperCamelCase__ , "config.json" ) )
if os.path.exists(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : str=False ) -> Any:
__snake_case : str = 2
if unlogit:
__snake_case : List[Any] = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : str = p * torch.log(UpperCamelCase__ )
__snake_case : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase__( lowercase : Tuple ) -> Union[str, Any]:
logger.info("lv, h >\t" + "\t".join(f"""{x + 1}""" for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[Any]=True , lowercase : Any=True , lowercase : str=None , lowercase : Union[str, Any]=False ) -> Any:
__snake_case , __snake_case : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case : List[str] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
__snake_case : Optional[Any] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
__snake_case : List[str] = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = 0.0
__snake_case : List[Any] = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((__snake_case ) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case , __snake_case , __snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
__snake_case : Any = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case : List[str] = 2
__snake_case : Tuple = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__snake_case : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(UpperCamelCase__ )
logger.info("Head ranked by importance scores" )
__snake_case : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case : Tuple = torch.arange(
head_importance.numel() , device=args.device )
__snake_case : List[str] = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase__( lowercase : str , lowercase : Any , lowercase : Any ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case : Union[str, Any] = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
__snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , UpperCamelCase__ , original_score * args.masking_threshold )
__snake_case : Optional[Any] = torch.ones_like(UpperCamelCase__ )
__snake_case : Optional[int] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case : Any = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case : Optional[int] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case : Union[str, Any] = float("Inf" )
__snake_case : List[Any] = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__snake_case : Dict = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__snake_case : List[str] = new_head_mask.view(-1 )
__snake_case : Any = 0.0
__snake_case : Tuple = new_head_mask.view_as(UpperCamelCase__ )
__snake_case : Union[str, Any] = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
__snake_case , __snake_case , __snake_case : List[Any] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__snake_case : Optional[int] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : Dict ) -> Optional[int]:
__snake_case : List[str] = datetime.now()
__snake_case , __snake_case , __snake_case : List[Any] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__snake_case : Tuple = 1 / loss
__snake_case : Optional[int] = datetime.now() - before_time
__snake_case : Any = sum(p.numel() for p in model.parameters() )
__snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__snake_case : List[Any] = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
__snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
__snake_case : Optional[Any] = datetime.now()
__snake_case , __snake_case , __snake_case : List[str] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
__snake_case : Any = 1 / loss
__snake_case : List[str] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , UpperCamelCase__ , UpperCamelCase__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCAmelCase__( ) -> Union[str, Any]:
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=UpperCamelCase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=UpperCamelCase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=UpperCamelCase__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=UpperCamelCase__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=UpperCamelCase__ , help="Batch size." )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=42 )
parser.add_argument("--local_rank" , type=UpperCamelCase__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
__snake_case : int = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__snake_case : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case : List[Any] = torch.device("cuda" , args.local_rank )
__snake_case : List[str] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
__snake_case : List[Any] = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , UpperCamelCase__ )
# Prepare dataset
__snake_case : Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case : Tuple = (torch.from_numpy(UpperCamelCase__ ),)
__snake_case : Optional[int] = TensorDataset(*UpperCamelCase__ )
__snake_case : List[str] = RandomSampler(UpperCamelCase__ )
__snake_case : int = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case : List[str] = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 326 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ""
__A = ""
__A = ""
__A = 250
def a__ ( ) -> None:
__lowerCAmelCase , __lowerCAmelCase: Dict = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
for index in range(UpperCamelCase__ ):
__lowerCAmelCase: List[str] = random.sample(range(len(UpperCamelCase__ ) ) , 4 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = update_image_and_anno(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , filter_scale=UpperCamelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCAmelCase: str = random_chars(3_2 )
__lowerCAmelCase: Tuple = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCAmelCase: Any = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
__lowerCAmelCase: Optional[Any] = []
for anno in new_annos:
__lowerCAmelCase: List[str] = anno[3] - anno[1]
__lowerCAmelCase: str = anno[4] - anno[2]
__lowerCAmelCase: List[Any] = anno[1] + width / 2
__lowerCAmelCase: int = anno[2] + height / 2
__lowerCAmelCase: Tuple = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(UpperCamelCase__ )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[list, list]:
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , "*.txt" ) ):
__lowerCAmelCase: Optional[int] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__lowerCAmelCase: str = in_file.readlines()
__lowerCAmelCase: List[str] = os.path.join(UpperCamelCase__ , F"{label_name}.jpg" )
__lowerCAmelCase: Optional[Any] = []
for obj_list in obj_lists:
__lowerCAmelCase: str = obj_list.rstrip("\n" ).split(" " )
__lowerCAmelCase: str = float(obj[1] ) - float(obj[3] ) / 2
__lowerCAmelCase: Optional[int] = float(obj[2] ) - float(obj[4] ) / 2
__lowerCAmelCase: Optional[int] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCAmelCase: List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , ) -> tuple[list, list, str]:
__lowerCAmelCase: int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCAmelCase: Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCAmelCase: Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCAmelCase: Dict = int(scale_x * output_size[1] )
__lowerCAmelCase: str = int(scale_y * output_size[0] )
__lowerCAmelCase: str = []
__lowerCAmelCase: List[str] = []
for i, index in enumerate(UpperCamelCase__ ):
__lowerCAmelCase: Tuple = all_img_list[index]
path_list.append(UpperCamelCase__ )
__lowerCAmelCase: Any = all_annos[index]
__lowerCAmelCase: str = cva.imread(UpperCamelCase__ )
if i == 0: # top-left
__lowerCAmelCase: Any = cva.resize(UpperCamelCase__ , (divid_point_x, divid_point_y) )
__lowerCAmelCase: int = img
for bbox in img_annos:
__lowerCAmelCase: Dict = bbox[1] * scale_x
__lowerCAmelCase: Any = bbox[2] * scale_y
__lowerCAmelCase: Dict = bbox[3] * scale_x
__lowerCAmelCase: Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCAmelCase: Dict = cva.resize(UpperCamelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCAmelCase: int = img
for bbox in img_annos:
__lowerCAmelCase: Tuple = scale_x + bbox[1] * (1 - scale_x)
__lowerCAmelCase: Tuple = bbox[2] * scale_y
__lowerCAmelCase: Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__lowerCAmelCase: Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCAmelCase: int = cva.resize(UpperCamelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCAmelCase: str = img
for bbox in img_annos:
__lowerCAmelCase: List[Any] = bbox[1] * scale_x
__lowerCAmelCase: Tuple = scale_y + bbox[2] * (1 - scale_y)
__lowerCAmelCase: Union[str, Any] = bbox[3] * scale_x
__lowerCAmelCase: Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCAmelCase: List[Any] = cva.resize(
UpperCamelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCAmelCase: Optional[int] = img
for bbox in img_annos:
__lowerCAmelCase: int = scale_x + bbox[1] * (1 - scale_x)
__lowerCAmelCase: Tuple = scale_y + bbox[2] * (1 - scale_y)
__lowerCAmelCase: Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__lowerCAmelCase: int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCAmelCase: Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCAmelCase: Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 217 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 0 |
import cva
import numpy as np
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :int , snake_case :List[Any] ):
'''simple docstring'''
if k in (0.04, 0.06):
A_ : int = k
A_ : Tuple = window_size
else:
raise ValueError("invalid k value" )
def __str__( self :int ):
'''simple docstring'''
return str(self.k )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict ):
'''simple docstring'''
A_ : Tuple = cva.imread(_A , 0 )
A_ , A_ : int = img.shape
A_ : Dict = []
A_ : str = img.copy()
A_ : Any = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
A_ , A_ : Optional[Any] = np.gradient(_A )
A_ : List[Any] = dx**2
A_ : Optional[Any] = dy**2
A_ : Union[str, Any] = dx * dy
A_ : List[Any] = 0.04
A_ : Optional[int] = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
A_ : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : Dict = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : List[str] = (wxx * wyy) - (wxy**2)
A_ : List[Any] = wxx + wyy
A_ : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCAmelCase : Tuple = HarrisCorner(0.04, 3)
_lowerCAmelCase : List[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 300 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["PerceiverFeatureExtractor"]
lowerCAmelCase_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( ):
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(UpperCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 195 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowercase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ : Optional[int] = 2_56
class __lowerCAmelCase ( a_ ):
"""simple docstring"""
_snake_case : Optional[int] = ['melgan']
def __init__( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__()
# From MELGAN
_UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
_UpperCamelCase = 4.0 # Largest value for most examples
_UpperCamelCase = 128
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]=(-1.0, 1.0) , lowerCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = output_range
if clip:
_UpperCamelCase = torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=(-1.0, 1.0) , lowerCAmelCase__ : str=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = input_range
_UpperCamelCase = torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case__ ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = input_tokens > 0
_UpperCamelCase , _UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_UpperCamelCase , _UpperCamelCase = self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case__ ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = noise_time
if not torch.is_tensor(_A ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCamelCase = self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Union[str, Any] = 100 , lowerCAmelCase__ : str = True , lowerCAmelCase__ : List[str] = "numpy" , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Tuple = 1 , ) -> str:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCamelCase = ones
_UpperCamelCase = self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCamelCase = self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_UpperCamelCase = self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_UpperCamelCase = mel[:1]
_UpperCamelCase = mel.cpu().float().numpy()
_UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 324 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = max(UpperCamelCase__ )
UpperCamelCase__ = min(UpperCamelCase__ )
# create the counting array
UpperCamelCase__ = coll_max + 1 - coll_min
UpperCamelCase__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
UpperCamelCase__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
UpperCamelCase__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 244 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_0_2_4 ):
a__ , a__ = [], []
a__ = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
a__ , a__ = sorted_examples[0]
def is_too_big(__lowerCAmelCase : Union[str, Any] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
a__ = new_src + ' ' + src
a__ = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
a__ , a__ = src, tgt
else: # can fit, keep adding
a__ , a__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
a__ = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
a__ , a__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
a__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
a__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
a__ , a__ = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F'packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
a__ , a__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.source' )
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.target' )
def __lowercase ( ):
a__ = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=1_2_8 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
a__ = parser.parse_args()
a__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A :
'''simple docstring'''
@property
def a_ (self ) -> str:
return self.get_dummy_input()
@property
def a_ (self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def a_ (self , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ) -> Tuple:
__UpperCamelCase : Optional[int] = 4
__UpperCamelCase : Optional[int] = 3_2
__UpperCamelCase : Union[str, Any] = (3_2, 3_2)
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Tuple = torch.device(_A )
__UpperCamelCase : Any = (batch_size, num_channels) + sizes
__UpperCamelCase : Any = randn_tensor(_A , generator=_A , device=_A )
__UpperCamelCase : Dict = {"hidden_states": hidden_states}
if include_temb:
__UpperCamelCase : Dict = 1_2_8
__UpperCamelCase : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
__UpperCamelCase : Dict = torch.manual_seed(1 )
__UpperCamelCase : str = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
__UpperCamelCase : List[str] = floats_tensor((batch_size, 3_2, 3_2) ).to(_A )
if include_skip_sample:
__UpperCamelCase : List[str] = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
__UpperCamelCase : int = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
__UpperCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def a_ (self , _UpperCAmelCase ) -> Any:
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase : List[Any] = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
__UpperCamelCase : Any = unet_block(**_A )
if isinstance(_A , _A ):
__UpperCamelCase : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
__UpperCamelCase : Dict = output[0, -1, -3:, -3:]
__UpperCamelCase : Any = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def a_ (self ) -> int:
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase : List[str] = self.block_class(**_A )
model.to(_A )
model.train()
__UpperCamelCase : Optional[Any] = model(**_A )
if isinstance(_A , _A ):
__UpperCamelCase : Optional[int] = output[0]
__UpperCamelCase : Tuple = torch.device(_A )
__UpperCamelCase : int = randn_tensor(output.shape , device=_A )
__UpperCamelCase : List[str] = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 298 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.