code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""note_seq"""]
def __init__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def __A ( cls : List[str] , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __A ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ = False
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
SCREAMING_SNAKE_CASE : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=20 , UpperCamelCase__ : Optional[int]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )) for i in range(len(UpperCamelCase__ ) )]
SCREAMING_SNAKE_CASE : str = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
SCREAMING_SNAKE_CASE : int = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
SCREAMING_SNAKE_CASE : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : Dict = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
SCREAMING_SNAKE_CASE : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : Tuple = ''' ''' + output_txt
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def __A ( self : Tuple , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE : Dict = tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : int = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Tuple = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCamelCase__ , phonemizer_lang='''en-us''' ).input_ids
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase__ , '''ɛ l o h aʊ a ʁ j u''' )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how Are you'''
SCREAMING_SNAKE_CASE : str = '''hello how are you'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ ).input_ids
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __A ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
SCREAMING_SNAKE_CASE : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase__ ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
[recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for la, la in zip(UpperCamelCase__ , UpperCamelCase__ )]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[str] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE : Dict = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.add_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.add_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size_a + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Tuple = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(output['''text'''] , UpperCamelCase__ )
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deformable_detr"""
UpperCamelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : List[str]=300 , UpperCamelCase__ : Any=1024 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Any=1024 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : int=False , UpperCamelCase__ : str="sine" , UpperCamelCase__ : Any="resnet50" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=300 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=0.25 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : Any = config_class.from_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = use_timm_backbone
SCREAMING_SNAKE_CASE : List[str] = backbone_config
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_queries
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : int = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : int = activation_function
SCREAMING_SNAKE_CASE : List[str] = init_std
SCREAMING_SNAKE_CASE : List[Any] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[Any] = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : Any = num_feature_levels
SCREAMING_SNAKE_CASE : Tuple = encoder_n_points
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_n_points
SCREAMING_SNAKE_CASE : List[str] = two_stage
SCREAMING_SNAKE_CASE : Optional[Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : List[str] = mask_loss_coefficient
SCREAMING_SNAKE_CASE : str = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = eos_coefficient
SCREAMING_SNAKE_CASE : List[str] = focal_alpha
SCREAMING_SNAKE_CASE : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __A ( self : str ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Any ):
'''simple docstring'''
return self.d_model
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type
return output
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Union[str, Any] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : List[str] = {
'gpt-neox-20b': 2048,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]="<|endoftext|>" , UpperCamelCase__ : Any="<|endoftext|>" , UpperCamelCase__ : Optional[Any]="<|endoftext|>" , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[str] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = pre_tok_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : "Conversation" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Dict = input_ids[-self.model_max_length :]
return input_ids
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : int = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """gptj"""
UpperCamelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=5_0400 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : Optional[int]=4096 , UpperCamelCase__ : List[str]=28 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : str=64 , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=5_0256 , UpperCamelCase__ : Any=5_0256 , UpperCamelCase__ : int=False , **UpperCamelCase__ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = n_positions
SCREAMING_SNAKE_CASE : int = n_embd
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : Any = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Dict = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : Dict = attn_pdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : Any = 0
@property
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_head
def __A ( self : Optional[int] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : int = seqlen + 2
SCREAMING_SNAKE_CASE : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : str = common_inputs['''attention_mask''']
if self.use_past:
SCREAMING_SNAKE_CASE : List[Any] = ordered_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 13
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE : Any = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase__ , cache_dir=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(UpperCamelCase__ , os.listdir(UpperCamelCase__ )[0] , '''snapshots''' ) )]
SCREAMING_SNAKE_CASE : List[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : Dict = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Any = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : List[Any] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
SCREAMING_SNAKE_CASE : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase__ ) == num_samples
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[int] = 50
SCREAMING_SNAKE_CASE : str = jax.device_count()
SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Optional[Any] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = 50
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE : List[Any] = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = 50
SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : List[str] = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = scheduler.create_state()
SCREAMING_SNAKE_CASE : Tuple = scheduler_state
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : str = 50
SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Any = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
SCREAMING_SNAKE_CASE : int = jax.device_count()
SCREAMING_SNAKE_CASE : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , use_memory_efficient_attention=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = '''en_speaker_1'''
SCREAMING_SNAKE_CASE : str = '''This is a test string'''
SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE : List[Any] = '''speaker_embeddings'''
def __A ( self : str , **UpperCamelCase__ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : Tuple = 35
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : List[str] = {
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BarkProcessor(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Any = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase : Tuple = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ):
'''simple docstring'''
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.major, self.minor, self.patch
def __A ( self : Any , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Version(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return other
raise TypeError(f"""{other} (type {type(UpperCamelCase__ )}) cannot be compared to version.""" )
def __eq__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : List[str] = self._validate_operand(UpperCamelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self._validate_operand(UpperCamelCase__ )
return self.tuple < other.tuple
def __hash__( self : str ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __A ( cls : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __A ( self : Tuple ):
'''simple docstring'''
return self.version_str
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(_lowercase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def A ( _lowercase ):
return ".".join(str(_lowercase ) for v in version_tuple )
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFInpaintingSuperResolutionPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : List[Any] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __A ( self : Optional[int] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Tuple ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : Dict ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {}
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def __A ( self : List[str] , UpperCamelCase__ : str , *UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=1 , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = output
def __A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=1.0 ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE : Tuple = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE : Any = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE : Optional[Any] = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = text.replace(UpperCamelCase__ , ''' '''.join(UpperCamelCase__ ) )
return text
def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : str , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Tuple=1.0 , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def __A ( self : Tuple , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[Any]=1.0 , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
from math import factorial
class lowercase__ :
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = real
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = [1] * rank
else:
SCREAMING_SNAKE_CASE : Optional[int] = rank
def __repr__( self : int ):
'''simple docstring'''
return (
f"""{self.real}+"""
f"""{'+'.join(str(UpperCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase__ )
def __add__( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE : List[Any] = self.duals.copy()
SCREAMING_SNAKE_CASE : List[str] = other.duals.copy()
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
o_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
elif len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
s_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) )
SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(UpperCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase__ )
UpperCamelCase_ = __add__
def __sub__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self + other * -1
def __mul__( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase__ )
UpperCamelCase_ = __mul__
def __truediv__( self : Any , UpperCamelCase__ : Dict ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase__ )
raise ValueError
def __floordiv__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase__ )
raise ValueError
def __pow__( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
if n < 0 or isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE : str = self
for _ in range(n - 1 ):
x *= self
return x
def A ( _lowercase , _lowercase , _lowercase ):
if not callable(_lowercase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_lowercase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''differentiate() requires an int as input for order''' )
SCREAMING_SNAKE_CASE : List[Any] = Dual(_lowercase , 1 )
SCREAMING_SNAKE_CASE : Dict = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A ( _lowercase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase__ ( nn.Module):
def __init__( self : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(4 , 5 )
def __A ( self : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCamelCase__ , '''index.json''' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE : List[str] = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Tuple = offload_weight(UpperCamelCase__ , '''weight''' , UpperCamelCase__ , {} )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''weight.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(UpperCamelCase__ ).split('''.''' )[1]}} )
SCREAMING_SNAKE_CASE : Optional[Any] = load_offloaded_weight(UpperCamelCase__ , index['''weight'''] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ModelForTest()
SCREAMING_SNAKE_CASE : Any = model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
SCREAMING_SNAKE_CASE : Tuple = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
SCREAMING_SNAKE_CASE : List[Any] = {k: v for k, v in state_dict.items() if '''weight''' in k}
SCREAMING_SNAKE_CASE : List[Any] = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
SCREAMING_SNAKE_CASE : int = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
SCREAMING_SNAKE_CASE : int = extract_submodules_state_dict(UpperCamelCase__ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase__ , {'''a.1''': 0, '''a.2''': 2} )
SCREAMING_SNAKE_CASE : Dict = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
SCREAMING_SNAKE_CASE : Optional[int] = extract_submodules_state_dict(UpperCamelCase__ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase__ , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
import sys
__UpperCamelCase : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = 1
for digit in s:
product *= int(_lowercase )
return product
def A ( _lowercase = N ):
SCREAMING_SNAKE_CASE : Any = -sys.maxsize - 1
SCREAMING_SNAKE_CASE : Optional[Any] = n[:13]
SCREAMING_SNAKE_CASE : Dict = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE : str = max(_lowercase , str_eval(_lowercase ) )
SCREAMING_SNAKE_CASE : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
from __future__ import annotations
__UpperCamelCase : Dict = tuple[int, int, int]
__UpperCamelCase : Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__UpperCamelCase : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__UpperCamelCase : List[str] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__UpperCamelCase : Any = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__UpperCamelCase : Dict = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__UpperCamelCase : int = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__UpperCamelCase : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__UpperCamelCase : Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__UpperCamelCase : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__UpperCamelCase : Tuple = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__UpperCamelCase : int = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__UpperCamelCase : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def A ( _lowercase , _lowercase , _lowercase ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowercase ) )) < 3:
SCREAMING_SNAKE_CASE : Dict = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowercase )
# Checks if rotor positions are valid
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rotpos
if not 0 < rotorposa <= len(_lowercase ):
SCREAMING_SNAKE_CASE : str = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
SCREAMING_SNAKE_CASE : List[str] = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
# Validates string and returns dict
SCREAMING_SNAKE_CASE : List[str] = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def A ( _lowercase ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = f"""Plugboard setting isn't type string ({type(_lowercase )})"""
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
SCREAMING_SNAKE_CASE : List[str] = f"""Odd number of symbols ({len(_lowercase )})"""
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
SCREAMING_SNAKE_CASE : Tuple = set()
for i in pbstring:
if i not in abc:
SCREAMING_SNAKE_CASE : Optional[Any] = f"""'{i}' not in list of symbols"""
raise Exception(_lowercase )
elif i in tmppbl:
SCREAMING_SNAKE_CASE : Dict = f"""Duplicate symbol ({i})"""
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
SCREAMING_SNAKE_CASE : List[Any] = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
SCREAMING_SNAKE_CASE : List[str] = pbstring[j + 1]
SCREAMING_SNAKE_CASE : str = pbstring[j]
return pb
def A ( _lowercase , _lowercase , _lowercase = (rotora, rotora, rotora) , _lowercase = "" , ):
SCREAMING_SNAKE_CASE : List[Any] = text.upper()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = _validator(
_lowercase , _lowercase , plugb.upper() )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = rotor_position
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
SCREAMING_SNAKE_CASE : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
SCREAMING_SNAKE_CASE : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
SCREAMING_SNAKE_CASE : List[Any] = abc.index(_lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : Any = rotora[index % len(_lowercase )]
# rotor rb --------------------------
SCREAMING_SNAKE_CASE : List[str] = abc.index(_lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : str = rotora[index % len(_lowercase )]
# rotor rc --------------------------
SCREAMING_SNAKE_CASE : Dict = abc.index(_lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : int = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
SCREAMING_SNAKE_CASE : int = reflector[symbol]
# 2nd rotors
SCREAMING_SNAKE_CASE : Union[str, Any] = abc[rotora.index(_lowercase ) - rotorposa]
SCREAMING_SNAKE_CASE : List[str] = abc[rotora.index(_lowercase ) - rotorposa]
SCREAMING_SNAKE_CASE : Dict = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
SCREAMING_SNAKE_CASE : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
SCREAMING_SNAKE_CASE : Any = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
SCREAMING_SNAKE_CASE : str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Dict = 'This is my Python script that emulates the Enigma machine from WWII.'
__UpperCamelCase : Optional[Any] = (1, 1, 1)
__UpperCamelCase : Union[str, Any] = 'pictures'
__UpperCamelCase : Union[str, Any] = (rotora, rotora, rotora)
__UpperCamelCase : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def A ( ):
SCREAMING_SNAKE_CASE : Any = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : str = 1_000
SCREAMING_SNAKE_CASE : Optional[Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = CvtConfig(num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE : Any = [2, 2, 20]
SCREAMING_SNAKE_CASE : List[str] = [3, 12, 16]
SCREAMING_SNAKE_CASE : int = [192, 768, 1_024]
SCREAMING_SNAKE_CASE : Any = CvtForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = torch.load(_lowercase , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE : str = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE : List[str] = list_of_state_dict + cls_token(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = list_of_state_dict + embeddings(_lowercase )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE : List[Any] = list_of_state_dict + attention(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowercase )
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """camembert"""
def __init__( self : str , UpperCamelCase__ : Optional[Any]=3_0522 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Optional[Any]=1E-12 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
def A ( _lowercase , _lowercase ):
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowercase )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE : List[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : List[str] = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE : List[str] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE : str = ''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__UpperCamelCase : int = 'AGGTAB'
__UpperCamelCase : Optional[Any] = 'GXTXAYB'
__UpperCamelCase : Union[str, Any] = 4
__UpperCamelCase : int = 'GTAB'
__UpperCamelCase , __UpperCamelCase : str = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
import comet # From: unbabel-comet
import torch
import datasets
__UpperCamelCase : Dict = datasets.logging.get_logger(__name__)
__UpperCamelCase : List[str] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
__UpperCamelCase : List[str] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
__UpperCamelCase : Any = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
if self.config_name == "default":
SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
SCREAMING_SNAKE_CASE : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : str=False ):
'''simple docstring'''
if gpus is None:
SCREAMING_SNAKE_CASE : List[str] = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE : List[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
SCREAMING_SNAKE_CASE : Tuple = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__UpperCamelCase : Dict = logging.getLogger(__name__)
__UpperCamelCase : str = {'facebook/bart-base': BartForConditionalGeneration}
__UpperCamelCase : Dict = {'facebook/bart-base': BartTokenizer}
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_lowercase , default=_lowercase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_lowercase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_lowercase , default=_lowercase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--config_name''' , type=_lowercase , default=_lowercase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_lowercase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_lowercase , default=_lowercase , help='''Where to store the final ONNX file.''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
return args
def A ( _lowercase , _lowercase="cpu" ):
SCREAMING_SNAKE_CASE : Dict = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = 0
return huggingface_model, tokenizer
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
model.eval()
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = '''My friends are cool but they eat too many carbs.'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='''pt''' ).to(model.device )
SCREAMING_SNAKE_CASE : str = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_lowercase , max_length=_lowercase , early_stopping=_lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowercase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowercase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_lowercase , )
logger.info('''Model exported to {}'''.format(_lowercase ) )
SCREAMING_SNAKE_CASE : str = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
SCREAMING_SNAKE_CASE : int = onnxruntime.InferenceSession(_lowercase )
SCREAMING_SNAKE_CASE : int = ort_sess.run(
_lowercase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def A ( ):
SCREAMING_SNAKE_CASE : Any = parse_args()
SCREAMING_SNAKE_CASE : str = 5
SCREAMING_SNAKE_CASE : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE : int = torch.device(args.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_model_tokenizer(args.model_name_or_path , _lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
SCREAMING_SNAKE_CASE : Optional[Any] = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE : Dict = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE : Optional[int] = args.output_file_path
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCamelCase : List[str] = 'base_with_context'
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Optional[int] = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : Dict = ly_weight['''attention''']
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Optional[Any] = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = ly_weight['''attention''']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE : str = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : str = ly_weight['''self_attention''']
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : List[str] = ly_weight['''MultiHeadDotProductAttention_0''']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE : Tuple = jnp.tree_util.tree_map(onp.array , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
SCREAMING_SNAKE_CASE : int = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = inference.parse_training_gin_file(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = inference.InferenceModel(args.checkpoint_path , _lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
SCREAMING_SNAKE_CASE : Optional[int] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE : Optional[int] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Any = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
__UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = GPTaTokenizer
UpperCamelCase_ = GPTaTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {"""add_prefix_space""": True}
UpperCamelCase_ = False
def __A ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : int = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : Any , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Optional[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''lower newer'''
SCREAMING_SNAKE_CASE : List[str] = '''lower newer'''
return input_text, output_text
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
SCREAMING_SNAKE_CASE : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing the unknown token
SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ):
'''simple docstring'''
pass
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
SCREAMING_SNAKE_CASE : Union[str, Any] = '''This is a simple input'''
SCREAMING_SNAKE_CASE : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE : List[Any] = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE : List[Any] = '''This is a simple input'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE : Optional[int] = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE : List[str] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Dict = tokenizer(*UpperCamelCase__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''$$$'''
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase__ , add_bos_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = '''This is a simple input'''
SCREAMING_SNAKE_CASE : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCamelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.get_tokenizer(do_lower_case=UpperCamelCase__ , add_bos_token=UpperCamelCase__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : List[Any] = '''Encode this.'''
SCREAMING_SNAKE_CASE : Dict = '''This one too please.'''
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
encoded_sequence += tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer.encode_plus(
UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = encoded_sequence_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCamelCase__ )
]
SCREAMING_SNAKE_CASE : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_tokenizers
class lowercase__ ( unittest.TestCase):
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(
UpperCamelCase__ , )
self.assertEqual(UpperCamelCase__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''./test_opt''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
UpperCamelCase__ , )
self.assertEqual(UpperCamelCase__ , [2, 250, 1345, 9, 10, 4758] )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
UpperCamelCase__ , )
# Same as above
self.assertEqual(UpperCamelCase__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = '''bos'''
SCREAMING_SNAKE_CASE : int = tokenizer.get_vocab()['''bos''']
SCREAMING_SNAKE_CASE : Any = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(
UpperCamelCase__ , )
# We changed the bos token
self.assertEqual(UpperCamelCase__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
UpperCamelCase__ , )
self.assertEqual(UpperCamelCase__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : Dict = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
def A ( _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """biogpt"""
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any]=4_2384 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = scale_embedding
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : str=18 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : str=True , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Dict = size_divisor
SCREAMING_SNAKE_CASE : Dict = do_rescale
def __A ( self : Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = GLPNImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = GLPNImageProcessingTester(self )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''resample''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_rescale''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = '▁'
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : List[Any] = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__UpperCamelCase : List[Any] = {
'google/pegasus-xsum': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PegasusTokenizer
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Dict="<mask_2>" , UpperCamelCase__ : List[str]="<mask_1>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=103 , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"""
f""" {type(UpperCamelCase__ )}""" )
SCREAMING_SNAKE_CASE : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : List[Any] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , pad_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def __A ( self : List[Any] , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __A ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : Distribution , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 1.0 if scale is None else scale
SCREAMING_SNAKE_CASE : Optional[int] = 0.0 if loc is None else loc
super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __A ( self : int ):
'''simple docstring'''
return self.variance.sqrt()
class lowercase__ ( nn.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = args_dim
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE : Dict = domain_map
def __A ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [proj(UpperCamelCase__ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase__ )
class lowercase__ ( nn.Module):
def __init__( self : str , UpperCamelCase__ : Tuple ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = function
def __A ( self : List[str] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.function(UpperCamelCase__ , *UpperCamelCase__ )
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __init__( self : str , UpperCamelCase__ : int = 1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = dim
SCREAMING_SNAKE_CASE : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCamelCase__ )
else:
return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 )
def __A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._base_distribution(UpperCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __A ( self : str ):
'''simple docstring'''
return len(self.event_shape )
@property
def __A ( self : int ):
'''simple docstring'''
return 0.0
def __A ( self : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return ParameterProjection(
in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __A ( self : List[str] , *UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __A ( UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase_ = StudentT
@classmethod
def __A ( cls : Optional[Any] , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE : Any = 2.0 + cls.squareplus(UpperCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = {"loc": 1, "scale": 1}
UpperCamelCase_ = Normal
@classmethod
def __A ( cls : str , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = {"total_count": 1, "logits": 1}
UpperCamelCase_ = NegativeBinomial
@classmethod
def __A ( cls : Optional[int] , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = cls.squareplus(UpperCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __A ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCamelCase : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase__ ( nn.Module):
def __init__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = torchvision.models.resnetaaa(pretrained=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : List[Any] = nn.Sequential(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.pool(self.model(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = torch.flatten(UpperCamelCase__ , start_dim=2 )
SCREAMING_SNAKE_CASE : str = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [json.loads(UpperCamelCase__ ) for l in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer
SCREAMING_SNAKE_CASE : Tuple = labels
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = max_seq_length
SCREAMING_SNAKE_CASE : int = transforms
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : List[str] = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Dict = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Any = self.transforms(UpperCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = [len(row['''sentence'''] ) for row in batch]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = len(_lowercase ), max(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowercase , _lowercase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = input_row['''sentence''']
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Dict = torch.stack([row['''image'''] for row in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([row['''label'''] for row in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([row['''image_start_token'''] for row in batch] )
SCREAMING_SNAKE_CASE : Dict = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
def A ( _lowercase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__UpperCamelCase : Dict = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = PhobertTokenizer
UpperCamelCase_ = False
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Dict = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
SCREAMING_SNAKE_CASE : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''l à</w>''']
SCREAMING_SNAKE_CASE : List[Any] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : List[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE : Optional[int] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : str = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = """ml.p3.2xlarge"""
UpperCamelCase_ = """accelerate_sagemaker_execution_role"""
UpperCamelCase_ = """hf-sm"""
UpperCamelCase_ = """us-east-1"""
UpperCamelCase_ = 1
UpperCamelCase_ = """accelerate-sagemaker-1"""
UpperCamelCase_ = """1.6"""
UpperCamelCase_ = """4.4"""
UpperCamelCase_ = """train.py"""
UpperCamelCase_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCamelCase_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase__ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase__ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase__ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase__ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """cvt"""
def __init__( self : Optional[int] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=[7, 3, 3] , UpperCamelCase__ : Dict=[4, 2, 2] , UpperCamelCase__ : str=[2, 1, 1] , UpperCamelCase__ : Optional[Any]=[64, 192, 384] , UpperCamelCase__ : Union[str, Any]=[1, 3, 6] , UpperCamelCase__ : Optional[Any]=[1, 2, 10] , UpperCamelCase__ : Any=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : int=[0.0, 0.0, 0.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.1] , UpperCamelCase__ : Any=[True, True, True] , UpperCamelCase__ : Union[str, Any]=[False, False, True] , UpperCamelCase__ : Optional[int]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : Optional[Any]=[2, 2, 2] , UpperCamelCase__ : List[Any]=[1, 1, 1] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Optional[int]=1E-12 , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = patch_sizes
SCREAMING_SNAKE_CASE : Optional[int] = patch_stride
SCREAMING_SNAKE_CASE : Tuple = patch_padding
SCREAMING_SNAKE_CASE : str = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = num_heads
SCREAMING_SNAKE_CASE : str = depth
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Any = attention_drop_rate
SCREAMING_SNAKE_CASE : List[Any] = drop_rate
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : Dict = cls_token
SCREAMING_SNAKE_CASE : List[str] = qkv_projection_method
SCREAMING_SNAKE_CASE : Optional[Any] = kernel_qkv
SCREAMING_SNAKE_CASE : str = padding_kv
SCREAMING_SNAKE_CASE : Union[str, Any] = stride_kv
SCREAMING_SNAKE_CASE : str = padding_q
SCREAMING_SNAKE_CASE : List[Any] = stride_q
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """swinv2"""
UpperCamelCase_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , UpperCamelCase__ : str=224 , UpperCamelCase__ : int=4 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=96 , UpperCamelCase__ : Optional[int]=[2, 2, 6, 2] , UpperCamelCase__ : str=[3, 6, 12, 24] , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[Any]=4.0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Optional[Any]=32 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Any = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Tuple = window_size
SCREAMING_SNAKE_CASE : Dict = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = (0, 0, 0, 0)
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCamelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : str = val
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
SCREAMING_SNAKE_CASE : str = value
else:
SCREAMING_SNAKE_CASE : str = value
return new_state_dict
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Any = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Dict = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[-256:]
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.size
SCREAMING_SNAKE_CASE : Tuple = max(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = 800 if '''detection''' in checkpoint_url else 1_000
SCREAMING_SNAKE_CASE : str = target_max_size / current_max_size
SCREAMING_SNAKE_CASE : List[str] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = F.to_tensor(_lowercase )
SCREAMING_SNAKE_CASE : str = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase ):
logger.info('''Converting model...''' )
# load original state dict
SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : Any = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE : int = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : int = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : Optional[int] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = 15
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''table''', 1: '''table rotated'''}
SCREAMING_SNAKE_CASE : Dict = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE : List[str] = 125
SCREAMING_SNAKE_CASE : Optional[int] = 6
SCREAMING_SNAKE_CASE : Union[str, Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
SCREAMING_SNAKE_CASE : Optional[int] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
SCREAMING_SNAKE_CASE : int = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE : List[Any] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
SCREAMING_SNAKE_CASE : int = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = Image.open(_lowercase ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : List[str] = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowercase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = (1, 15, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE : int = (1, 125, 7)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
SCREAMING_SNAKE_CASE : Dict = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_lowercase )
return parser.parse_args()
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE : Tuple = script_fpath.stem
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
SCREAMING_SNAKE_CASE : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
import string
def A ( _lowercase ):
for key in range(len(string.ascii_uppercase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
SCREAMING_SNAKE_CASE : List[Any] = string.ascii_uppercase.find(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = num - key
if num < 0:
SCREAMING_SNAKE_CASE : Optional[Any] = num + len(string.ascii_uppercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
SCREAMING_SNAKE_CASE : Optional[int] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def A ( ):
SCREAMING_SNAKE_CASE : Optional[int] = input('''Encrypted message: ''' )
SCREAMING_SNAKE_CASE : str = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCamelCase : Optional[int] = pytest.mark.integration
__UpperCamelCase : int = {'comet'}
__UpperCamelCase : Optional[int] = importlib.util.find_spec('fairseq') is not None
__UpperCamelCase : Optional[Any] = {'code_eval'}
__UpperCamelCase : List[str] = os.name == 'nt'
__UpperCamelCase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__UpperCamelCase : Dict = importlib.util.find_spec('transformers') is not None
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( _lowercase ):
@wraps(_lowercase )
def wrapper(self , _lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , _lowercase )
return wrapper
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
@local
class lowercase__ ( parameterized.TestCase):
UpperCamelCase_ = {}
UpperCamelCase_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def __A ( self : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''[...]'''
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase__ ) ).module_path )
SCREAMING_SNAKE_CASE : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ )
# check parameters
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE : Tuple = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __A ( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''[...]'''
SCREAMING_SNAKE_CASE : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE : Optional[Any] = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __A ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ):
yield
else:
yield
@contextmanager
def __A ( self : Optional[Any] ):
'''simple docstring'''
def load_local_metric(UpperCamelCase__ : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ):
return load_metric(os.path.join('''metrics''' , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
SCREAMING_SNAKE_CASE : Optional[Any] = load_local_metric
yield
@classmethod
def __A ( cls : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
def wrapper(UpperCamelCase__ : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = contextmanager(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def A ( _lowercase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def A ( _lowercase ):
import torch
def bert_cos_score_idf(_lowercase , _lowercase , *_lowercase , **_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def A ( _lowercase ):
def load_from_checkpoint(_lowercase ):
class lowercase__ :
def __A ( self : Any , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ):
'''simple docstring'''
assert len(UpperCamelCase__ ) == 2
SCREAMING_SNAKE_CASE : str = [0.19, 0.92]
return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
SCREAMING_SNAKE_CASE : int = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE : Any = load_from_checkpoint
yield
def A ( ):
SCREAMING_SNAKE_CASE : int = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''ERROR'''
SCREAMING_SNAKE_CASE : Optional[Any] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
metric.compute(predictions=[] , references=[] , scheme=_lowercase )
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
import pprint
import requests
__UpperCamelCase : int = 'https://zenquotes.io/api'
def A ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def A ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = random_quotes()
pprint.pprint(response)
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : str = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
from __future__ import annotations
from statistics import mean
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE : List[str] = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = -1
for i in range(_lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowercase )
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : str = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE : Any = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__UpperCamelCase : List[Any] = 4
__UpperCamelCase : Any = [2, 5, 3, 7]
__UpperCamelCase : Union[str, Any] = [0, 0, 0, 0]
__UpperCamelCase : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCamelCase : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """timm_backbone"""
def __init__( self : int , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = backbone
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Dict = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : int = out_indices if out_indices is not None else (-1,)
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def A ( _lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE : int = key.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = config.decoder_hidden_size
SCREAMING_SNAKE_CASE : Tuple = '''decoder.decoder_layers.'''
if "weight" in key:
SCREAMING_SNAKE_CASE : str = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Tuple = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : List[str] = val[:dim]
SCREAMING_SNAKE_CASE : Optional[int] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : int = config.hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = '''vit.encoder.layer.'''
if "weight" in key:
SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE : List[str] = val[:dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : List[Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[str] = val
return orig_state_dict
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : Any = 4_096
SCREAMING_SNAKE_CASE : Optional[int] = 24
SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = 14
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_280
SCREAMING_SNAKE_CASE : List[str] = 5_120
SCREAMING_SNAKE_CASE : Dict = 32
SCREAMING_SNAKE_CASE : str = 16
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMAEForPreTraining(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : str = image_processor(images=_lowercase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def __A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : List[str] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : Dict , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : List[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __A ( self : List[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __A ( self : Dict ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : int = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , UpperCamelCase__ )
self.assertIn('''attention_mask''' , UpperCamelCase__ )
self.assertNotIn('''labels''' , UpperCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase__ )
@require_torch
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __A ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = tokenizer(text_target=UpperCamelCase__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[int] = ['''Summary of the text.''', '''Another summary.''']
SCREAMING_SNAKE_CASE : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE : List[str] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = [[0] * len(UpperCamelCase__ ) for x in encoded_output['''input_ids''']]
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.pad(UpperCamelCase__ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None ):
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE : int = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE : List[str] = new_module
SCREAMING_SNAKE_CASE : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Dict = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
else:
SCREAMING_SNAKE_CASE : Dict = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE : Any = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = value.to('''cpu''' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_lowercase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE : Dict = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE : List[Any] = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
SCREAMING_SNAKE_CASE : str = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = value.to(_lowercase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
SCREAMING_SNAKE_CASE : List[Any] = new_value
else:
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE : Dict = new_value
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False ):
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE : List[str] = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = module.weight.shape
else:
SCREAMING_SNAKE_CASE : Tuple = module.in_features
SCREAMING_SNAKE_CASE : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE : str = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE : Union[str, Any] = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def A ( *_lowercase , **_lowercase ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def A ( *_lowercase , **_lowercase ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE : str = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = sum(_lowercase , [] )
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowercase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE : List[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE : Union[str, Any] = set(_lowercase ) - set(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE : int = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE : int = name.replace(_lowercase , '''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""audio_values""", """audio_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=2048 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=[16, 16] , UpperCamelCase__ : List[str]=128 , UpperCamelCase__ : Union[str, Any]=4_4100 , UpperCamelCase__ : Optional[int]=86 , UpperCamelCase__ : Dict=2048 , UpperCamelCase__ : Any=0.0 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : str = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Any = n_fft
SCREAMING_SNAKE_CASE : List[str] = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Dict = padding_value
SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE : Any = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCamelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : List[str] = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Any = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features[i]
SCREAMING_SNAKE_CASE : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'''audio_values''': padded_audio_features}
SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """unispeech-sat"""
def __init__( self : str , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=3072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : int="group" , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : int=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[str]=320 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Any=100 , UpperCamelCase__ : Union[str, Any]=256 , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Dict="mean" , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=256 , UpperCamelCase__ : Optional[int]=(512, 512, 512, 512, 1500) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Tuple=(1, 2, 3, 1, 1) , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[str]=504 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Tuple = feat_extract_norm
SCREAMING_SNAKE_CASE : Any = feat_extract_activation
SCREAMING_SNAKE_CASE : Dict = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = conv_bias
SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : int = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = final_dropout
SCREAMING_SNAKE_CASE : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = num_clusters
SCREAMING_SNAKE_CASE : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : str = apply_spec_augment
SCREAMING_SNAKE_CASE : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_length
SCREAMING_SNAKE_CASE : int = mask_time_min_masks
SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_prob
SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_length
SCREAMING_SNAKE_CASE : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Dict = num_codevectors_per_group
SCREAMING_SNAKE_CASE : str = num_codevector_groups
SCREAMING_SNAKE_CASE : Optional[Any] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : int = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Any = num_negatives
SCREAMING_SNAKE_CASE : Optional[Any] = codevector_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE : Optional[int] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Optional[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : int = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = xvector_output_dim
@property
def __A ( self : str ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
from __future__ import annotations
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = str(_lowercase )
return n == n[::-1]
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , _lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig):
UpperCamelCase_ = None
UpperCamelCase_ = None
class lowercase__ ( folder_based_builder.FolderBasedBuilder):
UpperCamelCase_ = datasets.Audio()
UpperCamelCase_ = """audio"""
UpperCamelCase_ = AudioFolderConfig
UpperCamelCase_ = 42 # definition at the bottom of the script
UpperCamelCase_ = AudioClassification(audio_column="""audio""" , label_column="""label""")
__UpperCamelCase : Dict = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__UpperCamelCase : Dict = AUDIO_EXTENSIONS
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def A ( _lowercase=None , _lowercase=None ):
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowercase__ :
UpperCamelCase_ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCamelCase_ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""})
UpperCamelCase_ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Benchmark training of model"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Verbose memory tracing"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Trace memory line by line"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save result to a CSV file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save all print statements in a log file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Whether to print environment information"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCamelCase_ = field(
default=f"inference_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCamelCase_ = field(
default=f"inference_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCamelCase_ = field(
default=f"train_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"train_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"env_info_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCamelCase_ = field(
default=f"log_{round(time())}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCamelCase_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __A ( self : Dict ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , UpperCamelCase__ , )
def __A ( self : Any ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __A ( self : List[Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCamelCase : int = sys.version_info >= (3, 10)
def A ( _lowercase=None , _lowercase=None ):
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class lowercase__ :
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """titi"""
UpperCamelCase_ = """toto"""
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """titi"""
UpperCamelCase_ = """toto"""
UpperCamelCase_ = 42
@dataclass
class lowercase__ :
UpperCamelCase_ = "toto"
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BasicEnum(self.foo )
@dataclass
class lowercase__ :
UpperCamelCase_ = "toto"
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MixedTypeEnum(self.foo )
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""})
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[])
@dataclass
class lowercase__ :
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[1, 2, 3])
UpperCamelCase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase_ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class lowercase__ :
UpperCamelCase_ = field()
UpperCamelCase_ = field()
UpperCamelCase_ = field()
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = BasicEnum(self.required_enum )
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = field()
UpperCamelCase_ = None
UpperCamelCase_ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class lowercase__ :
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """help message"""})
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[])
UpperCamelCase_ = list_field(default=[])
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] , UpperCamelCase__ : argparse.ArgumentParser , UpperCamelCase__ : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE : Tuple = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != '''container'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCamelCase__ ) and yy.get('''choices''' , UpperCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCamelCase__ ) , yy['''type'''](UpperCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--bar''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--baz''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--flag''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((SCREAMING_SNAKE_CASE) , ) : Optional[int] = parser.parse_args_into_dataclasses(UpperCamelCase__ , look_for_args_file=UpperCamelCase__ )
self.assertFalse(example.flag )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCamelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase__ , help='''help message''' )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCamelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : int = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __A ( self : Dict ):
'''simple docstring'''
@dataclass
class lowercase__ :
UpperCamelCase_ = "toto"
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE : str = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCamelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args([] )
self.assertEqual(
UpperCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument('''--bar''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : str = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , bar=UpperCamelCase__ , baz=UpperCamelCase__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE : Any = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument('''--required_str''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase__ , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCamelCase__ , )
expected.add_argument('''--opt''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCamelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_dict(UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCamelCase__ , parser.parse_dict , UpperCamelCase__ , allow_extra_keys=UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCamelCase__ , '''temp_json''' )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
SCREAMING_SNAKE_CASE : str = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : int = os.path.join(UpperCamelCase__ , '''temp_yaml''' )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
SCREAMING_SNAKE_CASE : List[Any] = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = (UniPCMultistepScheduler,)
UpperCamelCase_ = (("""num_inference_steps""", 25),)
def __A ( self : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase__ )
return config
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any=0 , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.dummy_sample
SCREAMING_SNAKE_CASE : Any = 0.1 * sample
SCREAMING_SNAKE_CASE : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str]=0 , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE : str = 0.1 * sample
SCREAMING_SNAKE_CASE : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : str = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : int = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
SCREAMING_SNAKE_CASE : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : str = self.full_loop(scheduler=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __A ( self : List[str] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def __A ( self : List[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.full_loop()
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
import os
import sys
import unittest
__UpperCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase : str = os.path.join(git_repo_path, 'src', 'transformers')
__UpperCamelCase : Optional[int] = '\n{0} = None\n'
__UpperCamelCase : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__UpperCamelCase : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase__ , '''tokenizers''' )
SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase__ , '''tensorflow_text''' )
SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers''' )
SCREAMING_SNAKE_CASE : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tensorflow_text''' )
SCREAMING_SNAKE_CASE : int = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase__ )
self.assertIn('''tensorflow_text''' , UpperCamelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , '''\nCONSTANT = None\n''' )
SCREAMING_SNAKE_CASE : Tuple = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
SCREAMING_SNAKE_CASE : str = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
SCREAMING_SNAKE_CASE : str = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
SCREAMING_SNAKE_CASE : str = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase__ )
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Any=30 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[int] = num_patches + 1
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ , labels=UpperCamelCase__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : int = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase_ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def __A ( self : str ):
'''simple docstring'''
pass
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __A ( self : Tuple ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ):
SCREAMING_SNAKE_CASE : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase):
@cached_property
def __A ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def __A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Optional[int] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
import random
class lowercase__ :
@staticmethod
def __A ( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [ord(UpperCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = []
for i in plain:
SCREAMING_SNAKE_CASE : Tuple = random.randint(1 , 300 )
SCREAMING_SNAKE_CASE : Optional[Any] = (i + k) * k
cipher.append(UpperCamelCase__ )
key.append(UpperCamelCase__ )
return cipher, key
@staticmethod
def __A ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCamelCase__ ) )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
import numpy as np
def A ( _lowercase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def A ( _lowercase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def A ( _lowercase = 2_000_000 ):
return sum(takewhile(lambda _lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__UpperCamelCase : Optional[Any] = False
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any]=32 ):
'''simple docstring'''
set_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE : Tuple = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE : int = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : int = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : Optional[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
import doctest
from collections import deque
import numpy as np
class lowercase__ :
def __init__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4]
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = len(self.first_signal )
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.second_signal )
SCREAMING_SNAKE_CASE : List[Any] = max(UpperCamelCase__ , UpperCamelCase__ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE : Dict = [[0] * max_length for i in range(UpperCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase__ )
for j, item in enumerate(UpperCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE : Optional[Any] = np.matmul(np.transpose(UpperCamelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__UpperCamelCase : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict()
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE : List[str] = reader.readlines()
for index, token in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE : Optional[int] = index
return vocab
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : List[Any]=200 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = vocab
SCREAMING_SNAKE_CASE : str = unk_token
SCREAMING_SNAKE_CASE : Any = max_input_chars_per_word
def __A ( self : Any , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = []
while start < len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = None
while start < end:
SCREAMING_SNAKE_CASE : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = end
return sub_tokens
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = False
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]="<d>" , UpperCamelCase__ : Union[str, Any]="</d>" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : int="</n>" , UpperCamelCase__ : List[str]="</_>" , UpperCamelCase__ : Tuple="left" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = bod_token
SCREAMING_SNAKE_CASE : Optional[int] = eod_token
SCREAMING_SNAKE_CASE : Dict = load_vocab(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.encoder[space_token]
SCREAMING_SNAKE_CASE : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.encoder["\n"]
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def __A ( self : int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : Optional[int] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
return token in self.encoder
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
return "".join(UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def __A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
SCREAMING_SNAKE_CASE : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE : List[Any] = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE : str = self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ):
raise RuntimeError('''CUDA out of memory.''' )
class lowercase__ ( nn.Module):
def __init__( self : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE : int = nn.Linear(4 , 5 )
def __A ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Any ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = mock_training_loop_function('''hello''' )
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __A ( self : List[Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase__ : str ):
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __A ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __A ( self : List[str] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : List[str] ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = release_memory(UpperCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase__ )
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCamelCase : Union[str, Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__UpperCamelCase : List[Any] = 'hopper-medium-v2'
__UpperCamelCase : Optional[int] = gym.make(env_name)
__UpperCamelCase : int = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__UpperCamelCase : str = env.reset()
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Optional[Any] = 1000
__UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCamelCase : Optional[int] = pipeline(obs, planning_horizon=32)
# execute action in environment
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = env.step(denorm_actions)
__UpperCamelCase : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCamelCase : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__UpperCamelCase : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__UpperCamelCase : str = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE : Optional[Any] = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : int = char
return pairs
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]="replace" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def __A ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : str = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Dict = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = bigram
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : str = 0
while i < len(UpperCamelCase__ ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Optional[int] = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = ''' '''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = word
return word
def __A ( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
for token in re.findall(self.pat , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def __A ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=False , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : str = ''' ''' + text
return (text, kwargs)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __A ( self : List[Any] , UpperCamelCase__ : "Conversation" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = ''' '''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def A ( _lowercase ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( _lowercase ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( _lowercase ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ): # Main function for testing.
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE : List[str] = Node(2 )
SCREAMING_SNAKE_CASE : List[str] = Node(3 )
SCREAMING_SNAKE_CASE : Optional[int] = Node(4 )
SCREAMING_SNAKE_CASE : Tuple = Node(5 )
SCREAMING_SNAKE_CASE : Dict = Node(6 )
SCREAMING_SNAKE_CASE : Optional[int] = Node(7 )
SCREAMING_SNAKE_CASE : Optional[Any] = Node(8 )
SCREAMING_SNAKE_CASE : List[str] = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print('''Tree is: ''' )
display(_lowercase )
if __name__ == "__main__":
main()
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
def A ( _lowercase ): # noqa: E741
SCREAMING_SNAKE_CASE : str = len(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Any = [0] * n
SCREAMING_SNAKE_CASE : Dict = [False] * n
SCREAMING_SNAKE_CASE : Optional[Any] = [False] * n
def dfs(_lowercase , _lowercase , _lowercase , _lowercase ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE : Optional[int] = dfs(_lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE : str = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE : int = True
else:
SCREAMING_SNAKE_CASE : List[str] = min(low[at] , _lowercase )
return out_edge_count
for i in range(_lowercase ):
if not visited[i]:
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = dfs(_lowercase , _lowercase , -1 , _lowercase )
SCREAMING_SNAKE_CASE : str = out_edge_count > 1
for x in range(len(_lowercase ) ):
if is_art[x] is True:
print(_lowercase )
# Adjacency list of graph
__UpperCamelCase : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
import warnings
from .generation import TFGenerationMixin
class lowercase__ ( UpperCamelCase_):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , UpperCamelCase_ , )
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : List[str] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __A ( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.get_input_output_texts(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def __A ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : Union[str, Any] = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self : Dict ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : List[str] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self : Dict ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Optional[int] = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self : List[str] ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Dict = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : str = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : List[Any] = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
SCREAMING_SNAKE_CASE : Any = {}
for i, token in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = i
SCREAMING_SNAKE_CASE : Optional[int] = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
SCREAMING_SNAKE_CASE : str = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : Tuple = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCamelCase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
SCREAMING_SNAKE_CASE : Tuple = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCamelCase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : Optional[int] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __A ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCamelCase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = i
SCREAMING_SNAKE_CASE : Any = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''cl-tohoku/bert-base-japanese'''
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
SCREAMING_SNAKE_CASE : Dict = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__UpperCamelCase : Optional[int] = 'pytorch_model.bin'
__UpperCamelCase : Union[str, Any] = 'pytorch_model.bin.index.json'
__UpperCamelCase : Dict = 'adapter_config.json'
__UpperCamelCase : Optional[int] = 'adapter_model.bin'
__UpperCamelCase : List[str] = 'adapter_model.safetensors'
__UpperCamelCase : Optional[Any] = 'tf_model.h5'
__UpperCamelCase : Optional[int] = 'tf_model.h5.index.json'
__UpperCamelCase : Any = 'model.ckpt'
__UpperCamelCase : Tuple = 'flax_model.msgpack'
__UpperCamelCase : Optional[Any] = 'flax_model.msgpack.index.json'
__UpperCamelCase : Optional[int] = 'model.safetensors'
__UpperCamelCase : int = 'model.safetensors.index.json'
__UpperCamelCase : List[Any] = 'config.json'
__UpperCamelCase : List[str] = 'preprocessor_config.json'
__UpperCamelCase : int = FEATURE_EXTRACTOR_NAME
__UpperCamelCase : int = 'generation_config.json'
__UpperCamelCase : Optional[Any] = 'modelcard.json'
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__UpperCamelCase : str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__UpperCamelCase : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__UpperCamelCase : Dict = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def A ( _lowercase ):
if version.parse(_lowercase ) < version.parse(_lowercase ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE : Dict = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCamelCase : Optional[int] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class lowercase__ ( tr.AbstractTransform):
def __init__( self : Dict , UpperCamelCase__ : str = " " ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = sentence_delimiter
def __A ( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
return list(UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCamelCase : Dict = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCamelCase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCamelCase : Any = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCamelCase : List[str] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCamelCase : Optional[Any] = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def __A ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : int = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : Union[str, Any]=24 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : str=6 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : str=None , UpperCamelCase__ : str=1000 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[str] = use_input_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Dict = range_bbox
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Union[str, Any] = t
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = LiltModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Any = LiltForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LiltForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
return True
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LiltModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def __A ( self : str ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = LiltModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@slow
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2]] , device=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = torch.Size([1, 2, 768] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCamelCase__ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase__ , atol=1E-3 ) )
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : int = 16
__UpperCamelCase : str = 32
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 16 ):
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : Any = DatasetDict(
{
'''train''': dataset['''train'''].select(_lowercase ),
'''validation''': dataset['''train'''].select(_lowercase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : str = 8
else:
SCREAMING_SNAKE_CASE : Any = None
return tokenizer.pad(
_lowercase , padding='''longest''' , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''test'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def A ( _lowercase , _lowercase ):
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
SCREAMING_SNAKE_CASE : Dict = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Optional[Any] = config['''lr''']
SCREAMING_SNAKE_CASE : Optional[int] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : Dict = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : int = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : str = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : int = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE : Dict = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE : List[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.loss
SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE : Dict = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**_lowercase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE : int = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print('''Average test metrics from all folds:''' , _lowercase )
def A ( ):
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_lowercase , default=_lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=_lowercase , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase__ :
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42 # [batch_size x 3]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __A ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self : str ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.shape
SCREAMING_SNAKE_CASE : List[str] = int(np.prod(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_coords()
SCREAMING_SNAKE_CASE : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE : List[str] = self.get_camera_rays(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self : Dict , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = coords.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Any = self.resolution()
SCREAMING_SNAKE_CASE : str = self.fov()
SCREAMING_SNAKE_CASE : str = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE : List[str] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE : int = fracs.view(UpperCamelCase__ , -1 , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE : Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE : int = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE : Tuple = -z * 4
SCREAMING_SNAKE_CASE : Optional[int] = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
SCREAMING_SNAKE_CASE : Tuple = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def A ( _lowercase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A ( _lowercase ):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : str = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
SCREAMING_SNAKE_CASE : int = DetrConfig(use_timm_backbone=_lowercase , backbone_config=_lowercase )
# set label attributes
SCREAMING_SNAKE_CASE : Dict = '''panoptic''' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : str = 250
else:
SCREAMING_SNAKE_CASE : Any = 91
SCREAMING_SNAKE_CASE : Optional[int] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Dict = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : int = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A ( _lowercase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : List[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = val
def A ( _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : Tuple = ''''''
if is_panoptic:
SCREAMING_SNAKE_CASE : List[Any] = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias_cross_attn[-256:]
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase=None , _lowercase=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = get_detr_config(_lowercase )
# load original model from torch hub
SCREAMING_SNAKE_CASE : Optional[int] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f"""Converting model {model_name}...""" )
SCREAMING_SNAKE_CASE : str = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_lowercase ).eval()
SCREAMING_SNAKE_CASE : int = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowercase ):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = '''detr.''' + src
rename_key(_lowercase , _lowercase , _lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : Optional[Any] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : List[str] = DetrForSegmentation(_lowercase ) if is_panoptic else DetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : List[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
SCREAMING_SNAKE_CASE : List[str] = DetrImageProcessor(format=_lowercase )
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE : List[str] = detr(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
__UpperCamelCase : List[str] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""vqvae"""]
def __init__( self : List[Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Mel , UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , mel=UpperCamelCase__ , vqvae=UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCamelCase__ ) else 1000
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = None , UpperCamelCase__ : np.ndarray = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = None , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : Union[str, Any]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE : int = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase__ , device=self.device , )
SCREAMING_SNAKE_CASE : Optional[Any] = noise
SCREAMING_SNAKE_CASE : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.mel.audio_slice_to_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE : Dict = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE : Dict = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase__ , 0 ) ).latent_dist.sample(
generator=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Any = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE : List[Any] = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Any = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )['''sample''']
else:
SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
if isinstance(self.scheduler , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE : Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE : List[str] = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase__ )['''sample''']
SCREAMING_SNAKE_CASE : str = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE : List[str] = (images * 255).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE : Any = [self.mel.image_to_audio(UpperCamelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase__ ) )
@torch.no_grad()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Image.Image] , UpperCamelCase__ : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE : Any = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE : str = torch.Tensor(UpperCamelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : Any = self.unet(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
SCREAMING_SNAKE_CASE : Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __A ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = acos(torch.dot(torch.flatten(UpperCamelCase__ ) , torch.flatten(UpperCamelCase__ ) ) / torch.norm(UpperCamelCase__ ) / torch.norm(UpperCamelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase__ )
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""image""": Image()})
UpperCamelCase_ = Features({"""labels""": ClassLabel})
UpperCamelCase_ = "image"
UpperCamelCase_ = "labels"
def __A ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Any = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE : Union[str, Any] = label_schema
return task_template
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
def A ( _lowercase = 100 ):
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = n + 1 # maximum limit
for a in range(2 , _lowercase ):
for b in range(2 , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = a**b # calculates the current power
collect_powers.add(_lowercase ) # adds the result to the set
return len(_lowercase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = 0
if start < end:
SCREAMING_SNAKE_CASE : Any = randint(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = a[end]
SCREAMING_SNAKE_CASE : Union[str, Any] = a[pivot]
SCREAMING_SNAKE_CASE : List[Any] = temp
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = randint(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : int = a[end]
SCREAMING_SNAKE_CASE : List[Any] = a[pivot]
SCREAMING_SNAKE_CASE : Tuple = temp
SCREAMING_SNAKE_CASE : List[str] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE : Dict = new_pivot_index + 1
SCREAMING_SNAKE_CASE : List[Any] = a[new_pivot_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = a[index]
SCREAMING_SNAKE_CASE : str = temp
SCREAMING_SNAKE_CASE : Any = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE : List[Any] = a[end]
SCREAMING_SNAKE_CASE : List[str] = temp
return new_pivot_index + 1, count
__UpperCamelCase : Dict = TemporaryFile()
__UpperCamelCase : List[str] = 100 # 1000 elements are to be sorted
__UpperCamelCase , __UpperCamelCase : List[str] = 0, 1 # mean and standard deviation
__UpperCamelCase : str = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__UpperCamelCase : List[str] = np.load(outfile)
__UpperCamelCase : str = len(M) - 1
__UpperCamelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCamelCase : Any = 'pt'
elif is_tf_available():
__UpperCamelCase : str = 'tf'
else:
__UpperCamelCase : Optional[int] = 'jax'
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = PerceiverTokenizer
UpperCamelCase_ = False
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __A ( self : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Tuple=20 , UpperCamelCase__ : Dict=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for i in range(len(UpperCamelCase__ ) ):
try:
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE : Dict = list(filter(lambda UpperCamelCase__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
SCREAMING_SNAKE_CASE : Any = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
SCREAMING_SNAKE_CASE : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : List[Any] = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
SCREAMING_SNAKE_CASE : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : Tuple = ''' ''' + output_txt
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = '''Unicode €.'''
SCREAMING_SNAKE_CASE : int = tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''[CLS]Unicode €.[SEP]''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCamelCase__ )
self.assertIn('''attention_mask''' , UpperCamelCase__ )
self.assertNotIn('''decoder_input_ids''' , UpperCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE : Dict = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE : Tuple = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE : List[Any] = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = [f"""<extra_id_{i}>""" for i in range(125 )]
SCREAMING_SNAKE_CASE : int = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCamelCase__ )]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : int = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( _lowercase ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE : Tuple = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
SCREAMING_SNAKE_CASE : List[Any] = PipelineDataFormat.from_str(
format=_lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowercase , _lowercase )
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Pipeline , UpperCamelCase__ : PipelineDataFormat ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = nlp
SCREAMING_SNAKE_CASE : List[Any] = reader
@staticmethod
def __A ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=UpperCamelCase__ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=UpperCamelCase__ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=UpperCamelCase__ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=UpperCamelCase__ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=UpperCamelCase__ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=UpperCamelCase__ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=UpperCamelCase__ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=UpperCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE : str = nlp(**UpperCamelCase__ ) if self._reader.is_multi_columns else nlp(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
outputs.append(UpperCamelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE : Any = self._reader.save_binary(UpperCamelCase__ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(UpperCamelCase__ )
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCamelCase : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCamelCase : Dict = '>>zh<<'
__UpperCamelCase : int = 'Helsinki-NLP/'
if is_torch_available():
__UpperCamelCase : List[str] = 'pt'
elif is_tf_available():
__UpperCamelCase : Union[str, Any] = 'tf'
else:
__UpperCamelCase : Union[str, Any] = 'jax'
@require_sentencepiece
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = MarianTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def __A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Dict = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
SCREAMING_SNAKE_CASE : int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Any , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''</s>'''
SCREAMING_SNAKE_CASE : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def __A ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
SCREAMING_SNAKE_CASE : List[str] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = [x.name for x in Path(UpperCamelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Tämä on testi'''
SCREAMING_SNAKE_CASE : Dict = '''This is a test'''
SCREAMING_SNAKE_CASE : List[Any] = [76, 7, 2047, 2]
SCREAMING_SNAKE_CASE : int = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
from math import pow
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Union[str, Any] = int(pow(_lowercase , _lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
return current_sum, solutions_count
def A ( _lowercase , _lowercase ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_lowercase , _lowercase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.