code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_a : str = logging.get_logger(__name__)
_a : int = {'vocab_file': 'vocab.txt'}
_a : int = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_a : Dict = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_a : Optional[int] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class a_ ( lowercase_ ):
A__ : int = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_INIT_CONFIGURATION
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : List[Any]="[PAD]" , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : List[str]="[MASK]" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_ ) != tokenize_chinese_chars
):
snake_case : List[Any] = getattr(lowerCamelCase_ , normalizer_state.pop('''type''' ) )
snake_case : int = do_lower_case
snake_case : Optional[Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : Any = normalizer_class(**lowerCamelCase_ )
snake_case : List[str] = do_lower_case
def lowerCAmelCase( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=None ):
"""simple docstring"""
snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : List[Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 598
|
'''simple docstring'''
from math import factorial
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 379
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( __lowercase ):
UpperCAmelCase__ = '''sew-d'''
def __init__( self :Optional[int] , _lowerCamelCase :List[str]=32 , _lowerCamelCase :Union[str, Any]=768 , _lowerCamelCase :str=12 , _lowerCamelCase :List[Any]=12 , _lowerCamelCase :Optional[Any]=3_072 , _lowerCamelCase :Optional[int]=2 , _lowerCamelCase :Optional[Any]=512 , _lowerCamelCase :int=256 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str=True , _lowerCamelCase :Optional[int]=("p2c", "c2p") , _lowerCamelCase :Optional[int]="layer_norm" , _lowerCamelCase :Any="gelu_python" , _lowerCamelCase :int=0.1 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Any=0.1 , _lowerCamelCase :Any=0.0 , _lowerCamelCase :Optional[Any]=0.1 , _lowerCamelCase :int=0.02 , _lowerCamelCase :Dict=1E-7 , _lowerCamelCase :str=1E-5 , _lowerCamelCase :str="group" , _lowerCamelCase :Optional[int]="gelu" , _lowerCamelCase :List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowerCamelCase :Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowerCamelCase :int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowerCamelCase :Dict=False , _lowerCamelCase :List[Any]=128 , _lowerCamelCase :str=16 , _lowerCamelCase :List[str]=True , _lowerCamelCase :Dict=0.05 , _lowerCamelCase :int=10 , _lowerCamelCase :Tuple=2 , _lowerCamelCase :str=0.0 , _lowerCamelCase :Dict=10 , _lowerCamelCase :int=0 , _lowerCamelCase :Any="mean" , _lowerCamelCase :Any=False , _lowerCamelCase :List[str]=False , _lowerCamelCase :int=256 , _lowerCamelCase :Tuple=0 , _lowerCamelCase :List[str]=1 , _lowerCamelCase :str=2 , **_lowerCamelCase :List[str] , ):
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCamelCase_ : Any =hidden_size
UpperCamelCase_ : Union[str, Any] =feat_extract_norm
UpperCamelCase_ : Union[str, Any] =feat_extract_activation
UpperCamelCase_ : Union[str, Any] =list(__a )
UpperCamelCase_ : str =list(__a )
UpperCamelCase_ : List[str] =list(__a )
UpperCamelCase_ : Dict =conv_bias
UpperCamelCase_ : Dict =num_conv_pos_embeddings
UpperCamelCase_ : Optional[Any] =num_conv_pos_embedding_groups
UpperCamelCase_ : Union[str, Any] =len(self.conv_dim )
UpperCamelCase_ : List[str] =num_hidden_layers
UpperCamelCase_ : Tuple =intermediate_size
UpperCamelCase_ : int =squeeze_factor
UpperCamelCase_ : Tuple =max_position_embeddings
UpperCamelCase_ : Any =position_buckets
UpperCamelCase_ : Dict =share_att_key
UpperCamelCase_ : int =relative_attention
UpperCamelCase_ : Tuple =norm_rel_ebd
UpperCamelCase_ : Optional[Any] =list(__a )
UpperCamelCase_ : Optional[int] =hidden_act
UpperCamelCase_ : str =num_attention_heads
UpperCamelCase_ : Tuple =hidden_dropout
UpperCamelCase_ : Optional[Any] =attention_dropout
UpperCamelCase_ : List[Any] =activation_dropout
UpperCamelCase_ : Any =feat_proj_dropout
UpperCamelCase_ : Dict =final_dropout
UpperCamelCase_ : Optional[Any] =layer_norm_eps
UpperCamelCase_ : Union[str, Any] =feature_layer_norm_eps
UpperCamelCase_ : Dict =initializer_range
UpperCamelCase_ : Dict =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_ : Any =apply_spec_augment
UpperCamelCase_ : Dict =mask_time_prob
UpperCamelCase_ : Any =mask_time_length
UpperCamelCase_ : str =mask_time_min_masks
UpperCamelCase_ : int =mask_feature_prob
UpperCamelCase_ : int =mask_feature_length
UpperCamelCase_ : Optional[int] =mask_feature_min_masks
# ctc loss
UpperCamelCase_ : Dict =ctc_loss_reduction
UpperCamelCase_ : Optional[int] =ctc_zero_infinity
# sequence classification
UpperCamelCase_ : int =use_weighted_layer_sum
UpperCamelCase_ : Tuple =classifier_proj_size
@property
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 720
|
"""simple docstring"""
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( __lowercase ):
UpperCamelCase_ : int =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase_ : Any =remove_duplicates(key.upper() )
UpperCamelCase_ : int =len(__lowercase )
# First fill cipher with key characters
UpperCamelCase_ : Union[str, Any] ={alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 26 ):
UpperCamelCase_ : List[Any] =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase_ : str =alphabet[i - offset]
UpperCamelCase_ : int =char
return cipher_alphabet
def A_ ( __lowercase , __lowercase ):
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def A_ ( ):
UpperCamelCase_ : Tuple =input('Enter message to encode or decode: ' ).strip()
UpperCamelCase_ : int =input('Enter keyword: ' ).strip()
UpperCamelCase_ : List[Any] =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
UpperCamelCase_ : List[str] ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
UpperCamelCase_ : List[Any] =create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 395
| 0
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
if not is_accelerate_available():
return method
__A : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__snake_case : int , **__snake_case : int ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__snake_case , **__snake_case )
return wrapper
| 8
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ : Any = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(lowercase_ ) , torch_builtin(lowercase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase_ ) , gelu_new(lowercase_ ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
lowerCamelCase_ : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ : List[Any] = get_activation("gelu" )
lowerCamelCase_ : List[Any] = get_activation("gelu_10" )
lowerCamelCase_ : str = torch_builtin(lowercase_ )
lowerCamelCase_ : Optional[int] = geluaa(lowercase_ )
lowerCamelCase_ : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(lowercase_ ):
get_activation("bogus" )
with self.assertRaises(lowercase_ ):
get_activation(lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
lowerCamelCase_ : Tuple = get_activation("gelu" )
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Dict = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase_ ):
lowerCamelCase_ : int = acta.a
| 704
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case_ : List[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case_ : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case_ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, float]:
"""simple docstring"""
lowerCamelCase_ : Tuple = len([g for position, g in enumerate(__UpperCAmelCase ) if g == main_target[position]] )
return (item, float(__UpperCAmelCase ))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = random.randint(0 , len(__UpperCAmelCase ) - 1 )
lowerCamelCase_ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase_ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = list(__UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase_ : Optional[int] = random.choice(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def __a ( __UpperCAmelCase : tuple[str, float] , __UpperCAmelCase : list[tuple[str, float]] , __UpperCAmelCase : list[str] , ) -> list[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
# Generate more children proportionally to the fitness score.
lowerCamelCase_ : str = int(parent_a[1] * 100 ) + 1
lowerCamelCase_ : List[str] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCAmelCase ):
lowerCamelCase_ : str = population_score[random.randint(0 , __UpperCAmelCase )][0]
lowerCamelCase_ , lowerCamelCase_ : str = crossover(parent_a[0] , __UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
return pop
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] , __UpperCAmelCase : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowerCamelCase_ : Tuple = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase_ : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase_ : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCAmelCase )
# Generate random starting population.
lowerCamelCase_ : int = []
for _ in range(__UpperCAmelCase ):
population.append("".join([random.choice(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase_ , lowerCamelCase_ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase_ : int = [evaluate(__UpperCAmelCase , __UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase_ : Any = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase_ : List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCamelCase_ : Optional[int] = [
(item, score / len(__UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCAmelCase ):
population.extend(select(population_score[int(__UpperCAmelCase )] , __UpperCAmelCase , __UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case_ : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
snake_case_ : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 253
| 0
|
def UpperCamelCase__ ( _A: str ):
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCamelCase__ ( _A: str ):
'''simple docstring'''
__lowerCamelCase = credit_card_number
__lowerCamelCase = 0
__lowerCamelCase = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
__lowerCamelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCamelCase = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase__ ( _A: str ):
'''simple docstring'''
__lowerCamelCase = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 479
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602
| 0
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase ( _snake_case ):
def __init__(self : Any , _A : str = "▁" , _A : bool = True , _A : Union[str, AddedToken] = "<unk>" , _A : Union[str, AddedToken] = "</s>" , _A : Union[str, AddedToken] = "<pad>" , ) -> Optional[Any]:
__snake_case : Dict = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__snake_case : Dict = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__snake_case : Optional[int] = token_dict['token']
__snake_case : Optional[int] = Tokenizer(Unigram())
__snake_case : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}') , ' '),
normalizers.Lowercase(),
])
__snake_case : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__),
pre_tokenizers.Digits(individual_digits=lowerCAmelCase__),
pre_tokenizers.Punctuation(),
])
__snake_case : Union[str, Any] = decoders.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__)
__snake_case : str = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__snake_case : Tuple = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
def _lowercase (self : Any , _A : Union[str, List[str]] , _A : int = 80_00 , _A : bool = True , ) -> List[str]:
__snake_case : Optional[int] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__snake_case : List[Any] = [files]
self._tokenizer.train(lowerCAmelCase__ , trainer=lowerCAmelCase__)
self.add_unk_id()
def _lowercase (self : Tuple , _A : Union[Iterator[str], Iterator[Iterator[str]]] , _A : int = 80_00 , _A : bool = True , ) -> List[Any]:
__snake_case : List[str] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
self._tokenizer.train_from_iterator(lowerCAmelCase__ , trainer=lowerCAmelCase__)
self.add_unk_id()
def _lowercase (self : List[Any]) -> Dict:
__snake_case : int = json.loads(self._tokenizer.to_str())
__snake_case : Tuple = self.special_tokens['unk']['id']
__snake_case : Union[str, Any] = Tokenizer.from_str(json.dumps(lowerCAmelCase__))
| 701
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_a : Tuple= "docs/source/en/_toctree.yml"
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = defaultdict(UpperCAmelCase_ )
__snake_case : List[str] = []
__snake_case : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCAmelCase_ )
__snake_case : Any = new_doc_list
__snake_case : Dict = [key for key, value in counts.items() if value > 1]
__snake_case : Optional[int] = []
for duplicate_key in duplicates:
__snake_case : Tuple = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__snake_case : List[Any] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int]=False ) -> str:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case : Optional[Any] = api_doc[scheduler_idx]['sections']
__snake_case : Tuple = clean_doc_toc(UpperCAmelCase_ )
__snake_case : Tuple = False
if new_scheduler_doc != scheduler_doc:
__snake_case : Optional[int] = True
if overwrite:
__snake_case : Tuple = new_scheduler_doc
if diff:
if overwrite:
__snake_case : Optional[int] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __UpperCAmelCase ( UpperCAmelCase_ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : Dict = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Dict = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case : Optional[int] = False
__snake_case : Optional[Any] = api_doc[pipeline_idx]['sections']
__snake_case : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case : List[str] = pipeline_doc['section']
__snake_case : Optional[int] = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
__snake_case : List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
__snake_case : List[Any] = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
__snake_case : Dict = True
if overwrite:
__snake_case : Tuple = new_pipeline_docs
if diff:
if overwrite:
__snake_case : Union[str, Any] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_a : List[str]= parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192
| 0
|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __lowercase : Optional[Any] = 101 ):
"""simple docstring"""
__lowercase =length
def __len__( self : Tuple ):
"""simple docstring"""
return self.length
def __getitem__( self : str , __lowercase : Any ):
"""simple docstring"""
return i
class lowerCAmelCase :
def __call__( self : Tuple , __lowercase : int ):
"""simple docstring"""
return {"input_ids": torch.tensor(a__ ), "labels": torch.tensor(a__ )}
class lowerCAmelCase ( nn.Module ):
def __init__( self : Dict ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase =nn.Linear(120 , 80 )
def snake_case ( self : List[Any] , __lowercase : Optional[int] , __lowercase : Any=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@require_torch_neuroncore
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =f'''--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
__lowercase =self.get_auto_remove_tmp_dir()
__lowercase =f'''--output_dir {output_dir}'''.split()
__lowercase =["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@require_torch_multi_gpu
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =f'''--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
__lowercase =self.get_auto_remove_tmp_dir()
__lowercase =f'''--output_dir {output_dir}'''.split()
__lowercase =["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase = HfArgumentParser((TrainingArguments,))
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCAmelCase = DummyDataset(dataset_length)
def __UpperCamelCase ( lowercase__ : EvalPrediction ):
'''simple docstring'''
__lowercase =list(range(len(_lowerCamelCase ) ) )
__lowercase =p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = 2
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = None
| 119
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_UpperCamelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def __A ( self , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : str = ort.SessionOptions()
_lowerCAmelCase : Tuple = False
return options
def __A ( self ):
_lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((128, 128) )
_lowerCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 213
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _UpperCamelCase :
def __init__( self :str , lowerCamelCase :Optional[int] , lowerCamelCase :Any=13 , lowerCamelCase :Tuple=7 , lowerCamelCase :List[str]=True , lowerCamelCase :int=True , lowerCamelCase :int=True , lowerCamelCase :Tuple=True , lowerCamelCase :Optional[int]=99 , lowerCamelCase :Union[str, Any]=64 , lowerCamelCase :Any=5 , lowerCamelCase :List[Any]=4 , lowerCamelCase :Tuple=37 , lowerCamelCase :Tuple="gelu" , lowerCamelCase :Optional[int]=0.1 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :Dict=512 , lowerCamelCase :List[str]=16 , lowerCamelCase :List[Any]=2 , lowerCamelCase :List[str]=0.02 , lowerCamelCase :int=3 , lowerCamelCase :str=4 , lowerCamelCase :Tuple=None , ) -> List[Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
UpperCAmelCase__ = vocab_size - 1
def UpperCAmelCase_ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self :str , lowerCamelCase :List[str] , lowerCamelCase :List[Any] , lowerCamelCase :List[str] ) -> str:
UpperCAmelCase__ = GPTNeoXModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase )
UpperCAmelCase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :Any , lowerCamelCase :List[Any] , lowerCamelCase :Tuple ) -> Union[str, Any]:
UpperCAmelCase__ = True
UpperCAmelCase__ = GPTNeoXModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Dict , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any] ) -> List[Any]:
UpperCAmelCase__ = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :str , lowerCamelCase :Optional[Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Tuple ) -> Any:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = GPTNeoXForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :str , lowerCamelCase :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :Dict , lowerCamelCase :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = GPTNeoXForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :str , lowerCamelCase :Dict , lowerCamelCase :str , lowerCamelCase :List[Any] ) -> Any:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = GPTNeoXForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Optional[int] , lowerCamelCase :str , lowerCamelCase :Dict ) -> List[str]:
UpperCAmelCase__ = True
UpperCAmelCase__ = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
UpperCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
UpperCAmelCase__ = output_from_no_past["hidden_states"][0]
UpperCAmelCase__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
UpperCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = GPTNeoXModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=64 , num_attention_heads=8 )
def UpperCAmelCase_ ( self :int ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :Optional[Any] ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ = GPTNeoXModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
UpperCAmelCase__ = original_model(lowerCamelCase ).last_hidden_state
UpperCAmelCase__ = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase__ = GPTNeoXModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
UpperCAmelCase__ = scaled_model(lowerCamelCase ).last_hidden_state
UpperCAmelCase__ = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
UpperCAmelCase__ = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase )
UpperCAmelCase__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase__ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
UpperCAmelCase__ = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 )
UpperCAmelCase__ = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 364
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :List[str] , **lowerCamelCase :List[Any] ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :int , **lowerCamelCase :str ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[int] , *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[str] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :str , **lowerCamelCase :List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Any , **lowerCamelCase :Dict ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :int , **lowerCamelCase :Dict ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :str ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Tuple , *lowerCamelCase :str , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :List[str] , **lowerCamelCase :Any ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :List[Any] , **lowerCamelCase :Optional[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Optional[int] , **lowerCamelCase :Dict ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :str ) -> str:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :str , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :List[str] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Dict , *lowerCamelCase :Dict , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Tuple , *lowerCamelCase :Optional[Any] , **lowerCamelCase :Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Any , *lowerCamelCase :Optional[int] , **lowerCamelCase :List[str] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Optional[Any] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :List[Any] , **lowerCamelCase :List[Any] ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :Optional[int] , **lowerCamelCase :str ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :List[Any] , *lowerCamelCase :str , **lowerCamelCase :int ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[str] , *lowerCamelCase :List[Any] , **lowerCamelCase :Any ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :Tuple ) -> int:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :Tuple ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , *lowerCamelCase :int , **lowerCamelCase :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowerCamelCase :int , **lowerCamelCase :List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase ):
UpperCAmelCase_ = ["""flax"""]
def __init__( self :int , *lowerCamelCase :Dict , **lowerCamelCase :Optional[int] ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls :int , *lowerCamelCase :Union[str, Any] , **lowerCamelCase :Any ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
| 364
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 526
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase: Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Union[str, Any] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: int = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase: List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
a : Union[str, Any] = collections.defaultdict(_A )
a : Optional[Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_A ):
a : Optional[Any] = None
if _re_tf_models.match(_A ) is not None:
a : List[str] = tf_models
a : str = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : Optional[int] = flax_models
a : List[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : int = pt_models
a : List[str] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_prefix_to_model_type:
a : Optional[Any] = True
break
# Try again after removing the last word in the name
a : List[str] = ''.join(camel_case_split(_A )[:-1] )
a : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
a : int = list(_A )
all_models.sort()
a : Optional[Any] = {'model_type': all_models}
a : List[str] = [pt_models[t] for t in all_models]
a : str = [tf_models[t] for t in all_models]
a : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
a : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
a : Optional[int] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
a : Union[str, Any] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
a : Tuple = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
a : str = 'AutoTokenizer'
a : str = [processors[t] for t in all_models]
return pd.DataFrame(_A )
def lowerCamelCase__ ( _A ):
a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
a : Optional[Any] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
a : Optional[int] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_A , _A , _A ):
# The type of pipeline may not exist in this framework
if not hasattr(_A , _A ):
continue
# First extract all model_names
a : List[str] = []
for name in getattr(_A , _A ).values():
if isinstance(_A , _A ):
model_names.append(_A )
else:
model_names.extend(list(_A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = get_frameworks_table()
a : List[str] = Dataset.from_pandas(_A )
a : Optional[Any] = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=_A )
a : List[Any] = Dataset.from_json(_A )
a : Optional[Any] = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(_A ) )
}
a : Dict = update_pipeline_and_auto_class_table(_A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
a : str = sorted(table.keys() )
a : List[Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
a : Optional[Any] = Dataset.from_pandas(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_A , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(_A , 'pipeline_tags.json' ) )
if commit_sha is not None:
a : int = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
a : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=_A , repo_type='dataset' , token=_A , commit_message=_A , )
def lowerCamelCase__ ( ):
a : int = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
a : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
a : str = []
for key in pipeline_tasks:
if key not in in_table:
a : Dict = pipeline_tasks[key]['pt']
if isinstance(_A , (list, tuple) ):
a : List[Any] = model[0]
a : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(_A )
if len(_A ) > 0:
a : List[str] = ', '.join(_A )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 526
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = num_of_nodes
UpperCAmelCase__ : list[list[int]] = []
UpperCAmelCase__ : dict[int, int] = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__ : List[Any] = self.find_component(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__ : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__UpperCamelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__ : str = self.find_component(__UpperCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__ : Tuple = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = edge
UpperCAmelCase__ : List[Any] = self.m_component[u]
UpperCAmelCase__ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__ : Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = edge
UpperCAmelCase__ : Any = self.m_component[u]
UpperCAmelCase__ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__ : Union[str, Any] = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: List[str], *a_: Optional[Any], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: List[str], **a_: Any ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: List[Any], **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 609
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "ChineseCLIPImageProcessor"
lowercase_ = ("BertTokenizer", "BertTokenizerFast")
def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
lowerCamelCase__: Tuple =kwargs.pop("feature_extractor")
lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.image_processor
def __call__(self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
lowerCamelCase__: List[str] =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
lowerCamelCase__: Union[str, Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: str =self.tokenizer.model_input_names
lowerCamelCase__: Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , )
return self.image_processor_class
| 59
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = '''vivit'''
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=[2, 16, 16] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_fast" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-06 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_frames
SCREAMING_SNAKE_CASE_ : Tuple = tubelet_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
super().__init__(**_SCREAMING_SNAKE_CASE )
| 353
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="fp4" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_in_abit
SCREAMING_SNAKE_CASE_ : Tuple = load_in_abit
SCREAMING_SNAKE_CASE_ : Dict = llm_inta_threshold
SCREAMING_SNAKE_CASE_ : Tuple = llm_inta_skip_modules
SCREAMING_SNAKE_CASE_ : Optional[int] = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE_ : Optional[Any] = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE_ : List[str] = bnb_abit_quant_type
SCREAMING_SNAKE_CASE_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
SCREAMING_SNAKE_CASE_ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = cls(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__} {self.to_json_string()}"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
if use_diff is True:
SCREAMING_SNAKE_CASE_ : int = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE_ : int = value
return serializable_config_dict
| 353
| 1
|
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase_ ) )
def lowerCamelCase__ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> bool:
if index == len(UpperCAmelCase_ ):
return True
# Recursive Step
for i in range(UpperCAmelCase_ ):
if valid_coloring(graph[index] , UpperCAmelCase_ , UpperCAmelCase_ ):
# Color current vertex
__snake_case = i
# Validate coloring
if util_color(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 ):
return True
# Backtrack
__snake_case = -1
return False
def lowerCamelCase__ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]:
__snake_case = [-1] * len(UpperCAmelCase_ )
if util_color(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 ):
return colored_vertices
return []
| 592
|
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__ = False ) -> None:
# Mapping from the first character of the prefix of the node
A__ = {}
# A node will be a leaf if the tree contains its word
A__ = is_leaf
A__ = prefix
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> tuple[str, str, str]:
A__ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
A__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A__ = RadixNode(prefix=SCREAMING_SNAKE_CASE__ , is_leaf=SCREAMING_SNAKE_CASE__ )
else:
A__ = self.nodes[word[0]]
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A__ = remaining_prefix
A__ = self.nodes[matching_string[0]]
A__ = RadixNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = aux_node
if remaining_word == "":
A__ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> bool:
A__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> bool:
A__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A__ = list(self.nodes.values() )[0]
A__ = merging_node.is_leaf
self.prefix += merging_node.prefix
A__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A__ = False
# If there is 1 edge, we merge it with its child
else:
A__ = list(incoming_node.nodes.values() )[0]
A__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A__ = merging_node.nodes
return True
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = 0 ) -> None:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowerCamelCase ( ) -> bool:
"""simple docstring"""
A__ = "banana bananas bandana band apple all beast".split()
A__ = RadixNode()
root.insert_many(UpperCAmelCase_ )
assert all(root.find(UpperCAmelCase_ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = RadixNode()
A__ = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(UpperCAmelCase_ )
print("Words:", UpperCAmelCase_ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 104
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Tuple=False ) -> List[str]:
'''simple docstring'''
try:
_UpperCAmelCase : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : Any = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : Union[str, Any] = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple=None , lowerCAmelCase: Optional[Any]=None ) -> Any:
'''simple docstring'''
if test_case is None:
return partial(lowerCamelCase_ , version=lowerCamelCase_ )
return unittest.skipUnless(is_torch_version(">=" , lowerCamelCase_ ) , F'test requires torch version >= {version}' )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(lowerCamelCase_ )
class a ( unittest.TestCase ):
_lowercase = True
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_ )
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = mocks if isinstance(UpperCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = AcceleratorState()
_UpperCAmelCase : List[Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : List[str] = gather(lowerCamelCase_ ).cpu()
_UpperCAmelCase : int = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase_ ):
return False
return True
class a :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = returncode
_UpperCAmelCase : Any = stdout
_UpperCAmelCase : Union[str, Any] = stderr
async def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
while True:
_UpperCAmelCase : Optional[int] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple=None , lowerCAmelCase: List[str]=None , lowerCAmelCase: str=None , lowerCAmelCase: List[str]=False , lowerCAmelCase: str=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
_UpperCAmelCase : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : int = []
def tee(lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any] , lowerCAmelCase: Any="" ):
_UpperCAmelCase : Union[str, Any] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Any=None , lowerCAmelCase: Union[str, Any]=180 , lowerCAmelCase: List[str]=False , lowerCAmelCase: Union[str, Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = asyncio.get_event_loop()
_UpperCAmelCase : Tuple = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
_UpperCAmelCase : Union[str, Any] = ' '.join(lowerCamelCase_ )
if result.returncode > 0:
_UpperCAmelCase : int = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class a ( __lowerCamelCase ):
pass
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple=False ) -> Dict:
'''simple docstring'''
try:
_UpperCAmelCase : int = subprocess.check_output(lowerCamelCase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase_ , "decode" ):
_UpperCAmelCase : int = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(lowerCamelCase_ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 706
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
_UpperCAmelCase : Dict = b.T
_UpperCAmelCase : Dict = np.sum(np.square(lowerCAmelCase ) , axis=1 )
_UpperCAmelCase : Optional[Any] = np.sum(np.square(lowerCAmelCase ) , axis=0 )
_UpperCAmelCase : str = np.matmul(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict ) -> int:
_UpperCAmelCase : Any = x.reshape(-1 , 3 )
_UpperCAmelCase : List[str] = squared_euclidean_distance(lowerCAmelCase , lowerCAmelCase )
return np.argmin(lowerCAmelCase , axis=1 )
class a ( UpperCAmelCase ):
_lowercase = ["pixel_values"]
def __init__( self , A_ = None , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
_UpperCAmelCase : Optional[int] = get_size_dict(A_ )
_UpperCAmelCase : Union[str, Any] = np.array(A_ ) if clusters is not None else None
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Optional[Any] = resample
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : List[str] = do_color_quantize
def _UpperCAmelCase ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : Dict = rescale(image=A_ , scale=1 / 1_27.5 , data_format=A_ )
_UpperCAmelCase : List[Any] = image - 1
return image
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : Dict = get_size_dict(A_ )
_UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase : Any = clusters if clusters is not None else self.clusters
_UpperCAmelCase : Optional[int] = np.array(A_ )
_UpperCAmelCase : List[str] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[str] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_normalize:
_UpperCAmelCase : List[str] = [self.normalize(image=A_ ) for image in images]
if do_color_quantize:
_UpperCAmelCase : Tuple = [to_channel_dimension_format(A_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase : List[str] = np.array(A_ )
_UpperCAmelCase : List[Any] = color_quantize(A_ , A_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase : Any = images.shape[0]
_UpperCAmelCase : List[Any] = images.reshape(A_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase : Union[str, Any] = list(A_ )
else:
_UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_UpperCAmelCase : List[Any] = {"input_ids": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 467
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase: Dict = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_lowercase: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: Any = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowercase: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192
| 1
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase =False
try:
__UpperCAmelCase =_is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = [] ):
'''simple docstring'''
A__ = 0
A__ = choices
A__ = prompt
if sys.platform == "win32":
A__ = "*"
else:
A__ = "➔ "
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase__ )
else:
forceWrite(self.choices[index] , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(UpperCamelCase__ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 ):
'''simple docstring'''
A__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase__ )
move_cursor(UpperCamelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowercase_ ( self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowercase_ ( self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowercase_ ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowercase_ ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase__ )] for number in range(10 )] )
def lowercase_ ( self ):
'''simple docstring'''
A__ = int(chr(self.current_selection ) )
A__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase__ )
else:
return
else:
return
def lowercase_ ( self , UpperCamelCase__ = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
A__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
A__ = int(builtins.input() )
except ValueError:
A__ = default_choice
else:
A__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(UpperCamelCase__ , "\n" )
return choice
| 714
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Dict = """imagegpt"""
lowercase__ : str = ["""past_key_values"""]
lowercase__ : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase__=5_12 + 1 , UpperCamelCase__=32 * 32 , UpperCamelCase__=5_12 , UpperCamelCase__=24 , UpperCamelCase__=8 , UpperCamelCase__=None , UpperCamelCase__="quick_gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = scale_attn_by_inverse_layer_idx
A__ = reorder_and_upcast_attn
A__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 3 , UpperCamelCase__ = 32 , UpperCamelCase__ = 32 , ):
'''simple docstring'''
A__ = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return inputs
| 261
| 0
|
import socket
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase_ : List[str] = socket.gethostname()
lowercase_ : Optional[int] = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase_ : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 458
|
import qiskit
def __magic_name__ ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowercase_ : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowercase_ : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : List[Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 458
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def _lowerCAmelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = "eval" ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(UpperCamelCase__ )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase_ = time.time()
try:
lowerCamelCase_ = eval_loop(
UpperCamelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
lowerCamelCase_ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase_ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase_ = time.time()
try:
lowerCamelCase_ = eval_loop(
UpperCamelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , '''predict''' )
lowerCamelCase_ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase_ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 66
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase__ ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 66
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def __a(SCREAMING_SNAKE_CASE_ : SplitDict ):
'''simple docstring'''
_lowerCAmelCase = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="my_dataset" )] )
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 18
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowercase ( __snake_case : Dict , __snake_case : tuple , __snake_case : Path , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any]=False , ):
output_path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , use_external_data_format=__snake_case , enable_onnx_checker=__snake_case , opset_version=__snake_case , )
else:
export(
__snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , opset_version=__snake_case , )
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : bool = False ):
lowercase_ : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase_ : Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase_ : List[Any] = '''cpu'''
lowercase_ : Tuple = StableDiffusionPipeline.from_pretrained(__snake_case , torch_dtype=__snake_case ).to(__snake_case )
lowercase_ : Any = Path(__snake_case )
# TEXT ENCODER
lowercase_ : Optional[int] = pipeline.text_encoder.config.max_position_embeddings
lowercase_ : Tuple = pipeline.text_encoder.config.hidden_size
lowercase_ : Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__snake_case , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=__snake_case , )
del pipeline.text_encoder
# UNET
lowercase_ : Dict = pipeline.unet.config.in_channels
lowercase_ : Optional[Any] = pipeline.unet.config.sample_size
lowercase_ : str = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(2 ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(2 , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=__snake_case , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=__snake_case , use_external_data_format=__snake_case , )
lowercase_ : List[str] = str(unet_path.absolute().as_posix() )
lowercase_ : Optional[int] = os.path.dirname(__snake_case )
lowercase_ : Dict = onnx.load(__snake_case )
# clean up existing tensor files
shutil.rmtree(__snake_case )
os.mkdir(__snake_case )
# collate external tensor files into one
onnx.save_model(
__snake_case , __snake_case , save_as_external_data=__snake_case , all_tensors_to_one_file=__snake_case , location='''weights.pb''' , convert_attribute=__snake_case , )
del pipeline.unet
# VAE ENCODER
lowercase_ : Optional[int] = pipeline.vae
lowercase_ : Dict = vae_encoder.config.in_channels
lowercase_ : int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase_ : Dict = lambda __snake_case , __snake_case : vae_encoder.encode(__snake_case , __snake_case )[0].sample()
onnx_export(
__snake_case , model_args=(
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__snake_case , )
# VAE DECODER
lowercase_ : int = pipeline.vae
lowercase_ : Optional[Any] = vae_decoder.config.latent_channels
lowercase_ : int = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase_ : Any = vae_encoder.decode
onnx_export(
__snake_case , model_args=(
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__snake_case , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase_ : Optional[int] = pipeline.safety_checker
lowercase_ : List[str] = safety_checker.config.vision_config.num_channels
lowercase_ : Optional[int] = safety_checker.config.vision_config.image_size
lowercase_ : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __snake_case , __snake_case , __snake_case , ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=__snake_case , )
del pipeline.safety_checker
lowercase_ : Any = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase_ : Dict = pipeline.feature_extractor
else:
lowercase_ : List[Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : int = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__snake_case , feature_extractor=__snake_case , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__snake_case )
print('''ONNX pipeline saved to''' , __snake_case )
del pipeline
del onnx_pipeline
lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained(__snake_case , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__A : List[str] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 141
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> Optional[Any]:
lowercase_ : Any = inspect.getfile(accelerate.test_utils )
lowercase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase_ : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowercase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def A ( self : List[str] ) -> List[str]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : List[Any] ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : str ) -> Union[str, Any]:
lowercase_ : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[int] ) -> Optional[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowercase_ : Optional[int] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
__A : List[Any] = Accelerator()
__A : Dict = (accelerator.state.process_index + 2, 10)
__A : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
__A : Optional[Any] = ''''''
__A : Optional[int] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__A : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__A : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 141
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = torch.load(__lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
__lowerCAmelCase = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
__lowerCAmelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowerCAmelCase )
__lowerCAmelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowerCAmelCase = sd.pop(__lowerCAmelCase )
__lowerCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowerCAmelCase = sd[key]
# We split QKV in separate Q,K,V
__lowerCAmelCase = key.replace(".qkv_proj." , ".q_proj." )
__lowerCAmelCase = key.replace(".qkv_proj." , ".k_proj." )
__lowerCAmelCase = key.replace(".qkv_proj." , ".v_proj." )
__lowerCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = torch.split(__lowerCAmelCase , depth // 3 , dim=0 )
__lowerCAmelCase = q
__lowerCAmelCase = k
__lowerCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=None ):
__lowerCAmelCase = load_checkpoint(__lowerCAmelCase )
if config is not None:
__lowerCAmelCase = OPTConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCAmelCase = OPTConfig()
__lowerCAmelCase = OPTModel(__lowerCAmelCase ).half().eval()
model.load_state_dict(__lowerCAmelCase )
# Check results
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCamelCase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 367
|
from __future__ import annotations
_A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[list[int]] , ):
"""simple docstring"""
lowerCAmelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCAmelCase ) )
] # the reference grid
lowerCAmelCase_ = 1
lowerCAmelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCAmelCase ) )
] # the action grid
lowerCAmelCase_ = init[0]
lowerCAmelCase_ = init[1]
lowerCAmelCase_ = 0
lowerCAmelCase_ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase_ = [[f, g, x, y]]
lowerCAmelCase_ = False # flag that is set when search is complete
lowerCAmelCase_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase_ = cell.pop()
lowerCAmelCase_ = next_cell[2]
lowerCAmelCase_ = next_cell[3]
lowerCAmelCase_ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase_ = True
else:
for i in range(len(__lowerCAmelCase ) ): # to try out different valid actions
lowerCAmelCase_ = x + DIRECTIONS[i][0]
lowerCAmelCase_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase_ = g + cost
lowerCAmelCase_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase_ = 1
lowerCAmelCase_ = i
lowerCAmelCase_ = []
lowerCAmelCase_ = goal[0]
lowerCAmelCase_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase_ = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase_ = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase_ = xa
lowerCAmelCase_ = ya
invpath.append([x, y] )
lowerCAmelCase_ = []
for i in range(len(__lowerCAmelCase ) ):
path.append(invpath[len(__lowerCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_A = [0, 0]
# all coordinates are given in format [y,x]
_A = [len(grid) - 1, len(grid[0]) - 1]
_A = 1
# the cost map which pushes the path closer to the goal
_A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_A = 99
_A, _A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 290
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowercase ( lowerCamelCase__ ):
"""simple docstring"""
def __lt__( self , __UpperCAmelCase ) -> Union[str, Any]:
return self[-1] < other[-1]
def __eq__( self , __UpperCAmelCase ) -> int:
return self[-1] == other[-1]
def snake_case__ ( lowerCamelCase_ ):
A : list[Stack] = []
# sort into stacks
for element in collection:
A : str = Stack([element] )
A : Any = bisect_left(lowerCamelCase_ , lowerCamelCase_ )
if i != len(lowerCamelCase_ ):
stacks[i].append(lowerCamelCase_ )
else:
stacks.append(lowerCamelCase_ )
# use a heap-based merge to merge stack efficiently
A : Union[str, Any] = merge(*(reversed(lowerCamelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase : List[str] = input("Enter numbers separated by a comma:\n").strip()
lowercase : str = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 702
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Any = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 423
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
SCREAMING_SNAKE_CASE_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
SCREAMING_SNAKE_CASE_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ = False
@property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return 32
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase_ = DDIMScheduler(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 255.0
lowerCamelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ = 'A robot, 4k photo'
lowerCamelCase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ ,lowerCamelCase_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.85 , generator=SCREAMING_SNAKE_CASE_ , negative_prompt='' , ).to_tuple()
lowerCamelCase_ = pipeline(
image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 148
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 148
| 1
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : List[Any] = field
__lowercase : List[str] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowercase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
__lowercase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase : List[Any] = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowercase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowercase : Any = dataset
__lowercase : Dict = path_or_buf
__lowercase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : List[str] = num_proc
__lowercase : Optional[Any] = '''utf-8'''
__lowercase : Tuple = to_json_kwargs
def _lowerCamelCase ( self ) -> int:
__lowercase : str = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
__lowercase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowercase : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowercase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowercase : Tuple = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
__lowercase : List[str] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowercase : Tuple = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = args
__lowercase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ) -> int:
__lowercase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowercase : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
__lowercase ,__lowercase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 76
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[str] , snake_case : int , snake_case : Optional[int]=1_3 , snake_case : List[str]=3_0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=3_2 , snake_case : int=5 , snake_case : int=4 , snake_case : List[str]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.1 , snake_case : Dict=0.1 , snake_case : Any=1_0 , snake_case : Any=0.02 , snake_case : int=3 , snake_case : int=0.6 , snake_case : str=None , ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : Optional[int] = patch_size
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Optional[int] = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = type_sequence_label_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : Union[str, Any] = mask_ratio
UpperCamelCase_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : int = (image_size // patch_size) ** 2
UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : Optional[int] , snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Optional[int] = model(snake_case )
UpperCamelCase_ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Tuple = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = config_and_inputs
UpperCamelCase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ViTMAEModelTester(self )
UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
UpperCamelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : Optional[Any] = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Optional[int] = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Any = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : int = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Tuple = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : str = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : Optional[Any] = ViTMAEConfig()
UpperCamelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Optional[int] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 417
| 0
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def a__ ( a : np.ndarray , a : float , a : int = 16_000 ):
"""simple docstring"""
_snake_case : List[str] = int(round(sample_rate * max_length ) )
if len(a ) <= sample_length:
return wav
_snake_case : str = randint(0 , len(a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCAmelCase :
__lowercase : Optional[str] = field(default=_snake_case , metadata={"""help""": """Name of a dataset from the datasets package"""})
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """A file containing the training audio paths and labels."""})
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """A file containing the validation audio paths and labels."""})
__lowercase : str = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__lowercase : str = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__lowercase : str = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__lowercase : str = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""})
__lowercase : Optional[int] = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowercase : Optional[int] = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__lowercase : float = field(
default=2_0 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _UpperCAmelCase :
__lowercase : str = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""})
__lowercase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowercase : Optional[str] = field(
default=_snake_case , metadata={"""help""": """Name or path of preprocessor config."""})
__lowercase : bool = field(
default=_snake_case , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""})
__lowercase : bool = field(
default=_snake_case , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""})
__lowercase : bool = field(
default=_snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowercase : Optional[bool] = field(
default=_snake_case , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
__lowercase : bool = field(
default=_snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase__ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , snake_case_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , a , a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Dict = training_args.get_process_log_level()
logger.setLevel(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case : List[str] = DatasetDict()
_snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case : Optional[int] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case : int = feature_extractor.model_input_names[0]
def train_transforms(a : int ):
_snake_case : Any = []
for audio in batch[data_args.audio_column_name]:
_snake_case : Optional[int] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(a )
_snake_case : Dict = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
_snake_case : Dict = {model_input_name: inputs.get(a )}
_snake_case : Dict = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(a : Optional[int] ):
_snake_case : List[Any] = [audio["array"] for audio in batch[data_args.audio_column_name]]
_snake_case : Dict = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
_snake_case : int = {model_input_name: inputs.get(a )}
_snake_case : Any = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case : Optional[int] = raw_datasets["train"].features[data_args.label_column_name].names
_snake_case , _snake_case : int = {}, {}
for i, label in enumerate(a ):
_snake_case : int = str(a )
_snake_case : int = label
# Load the accuracy metric from the datasets package
_snake_case : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(a : List[str] ):
_snake_case : List[str] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=a , references=eval_pred.label_ids )
_snake_case : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a ) , labelaid=a , idalabel=a , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : List[str] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case : List[Any] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(a , output_all_columns=a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case : Union[str, Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(a , output_all_columns=a )
# Initialize our trainer
_snake_case : Tuple = Trainer(
model=a , args=a , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=a , tokenizer=a , )
# Training
if training_args.do_train:
_snake_case : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : List[str] = last_checkpoint
_snake_case : Union[str, Any] = trainer.train(resume_from_checkpoint=a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case : Optional[int] = trainer.evaluate()
trainer.log_metrics("eval" , a )
trainer.save_metrics("eval" , a )
# Write model card and (optionally) push to hub
_snake_case : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
if __name__ == "__main__":
main()
| 87
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87
| 1
|
"""simple docstring"""
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase ( a__ = 50_00 ):
'''simple docstring'''
lowerCAmelCase :List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase__ )]
for i, pentagonal_i in enumerate(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
lowerCAmelCase :Tuple = pentagonal_nums[j]
lowerCAmelCase :Optional[Any] = pentagonal_i + pentagonal_j
lowerCAmelCase :int = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCAmelCase__ ) and is_pentagonal(UpperCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 553
|
'''simple docstring'''
from math import pow
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ = int(pow(UpperCAmelCase__, UpperCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
return current_sum, solutions_count
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCAmelCase__, UpperCAmelCase__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( A_ : int , A_ : int ):
__UpperCAmelCase = []
create_all_state(1 , A_ , A_ , [] , A_ )
return result
def __lowerCAmelCase ( A_ : int , A_ : int , A_ : int , A_ : list[int] , A_ : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A_ , total_number - level + 2 ):
current_list.append(A_ )
create_all_state(i + 1 , A_ , level - 1 , A_ , A_ )
current_list.pop()
def __lowerCAmelCase ( A_ : list[list[int]] ):
for i in total_list:
print(*A_ )
if __name__ == "__main__":
a_ = 4
a_ = 2
a_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 700
|
def __lowerCAmelCase ( A_ : str ) -> Any:
if not head:
return True
# split the list to two parts
__UpperCAmelCase , __UpperCAmelCase = head.next, head
while fast and fast.next:
__UpperCAmelCase = fast.next.next
__UpperCAmelCase = slow.next
__UpperCAmelCase = slow.next
__UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
__UpperCAmelCase = None
while second:
__UpperCAmelCase = second.next
__UpperCAmelCase = node
__UpperCAmelCase = second
__UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__UpperCAmelCase = node.next
__UpperCAmelCase = head.next
return True
def __lowerCAmelCase ( A_ : Optional[Any] ) -> Optional[Any]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__UpperCAmelCase = __UpperCAmelCase = __UpperCAmelCase = head
while fast and fast.next:
__UpperCAmelCase , __UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
__UpperCAmelCase = [slow.val]
while slow.next:
__UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__UpperCAmelCase = cur.next
return True
def __lowerCAmelCase ( A_ : int ) -> Optional[int]:
if not head or not head.next:
return True
__UpperCAmelCase = {}
__UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(A_ )
else:
__UpperCAmelCase = [pos]
__UpperCAmelCase = head.next
pos += 1
__UpperCAmelCase = pos - 1
__UpperCAmelCase = 0
for v in d.values():
if len(A_ ) % 2 != 0:
middle += 1
else:
__UpperCAmelCase = 0
for i in range(0 , len(A_ ) ):
if v[i] + v[len(A_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 286
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668
|
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = mock.Mock()
_lowerCAmelCase : Optional[int] = 500
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = HTTPError
_lowerCAmelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=_lowercase ) as mock_head:
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Any = HTTPError
_lowerCAmelCase : Tuple = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Any = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=_lowercase ) as mock_head:
_lowerCAmelCase : Tuple = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
_lowerCAmelCase : Dict = tempfile.mktemp()
with open(_lowercase ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,_lowercase )
_lowerCAmelCase : Union[str, Any] = AlbertTokenizer.from_pretrained(_lowercase )
finally:
os.remove(_lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,_lowercase )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
_lowerCAmelCase : Any = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Any = BertTokenizer(_lowercase )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : List[str] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase ,repo_id='test-tokenizer' ,push_to_hub=_lowercase ,use_auth_token=self._token )
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[Any] = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[Any] = BertTokenizer(_lowercase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
_lowerCAmelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowercase ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=_lowercase ,use_auth_token=self._token )
_lowerCAmelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Tuple = CustomTokenizer(_lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Any = BertTokenizerFast.from_pretrained(_lowercase )
bert_tokenizer.save_pretrained(_lowercase )
_lowerCAmelCase : Any = CustomTokenizerFast.from_pretrained(_lowercase )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" ,use_fast=_lowercase ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Trie()
_lowerCAmelCase : List[Any] = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowercase ,['AB', 'C'] )
| 702
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16
| 0
|
from itertools import permutations
def _UpperCamelCase (a__ :tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase__ = [7, 11, 13, 17]
for i, test in enumerate(a__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase (a__ :int = 10 ):
"""simple docstring"""
return sum(
int("""""".join(map(a__ , a__ ) ) )
for num in permutations(range(a__ ) )
if is_substring_divisible(a__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 619
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = CodeGenTokenizer
snake_case : Dict = CodeGenTokenizerFast
snake_case : Tuple = True
snake_case : Optional[int] = {"""add_prefix_space""": True}
snake_case : int = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
UpperCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = """lower newer"""
# Testing tokenization
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
UpperCamelCase__ = tokens + [rust_tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCamelCase ( self , __lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input looooooooong""", """This is a simple input"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(*__lowerCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """$$$"""
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase__ = tokenizer.decode(out_s.input_ids )
UpperCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
UpperCamelCase__ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
UpperCamelCase__ = """\nif len_a > len_b: result = a\nelse: result = b"""
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase )
UpperCamelCase__ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
UpperCamelCase__ = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
pass
| 619
| 1
|
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return []
UpperCAmelCase , UpperCAmelCase = min(_lowerCAmelCase ), max(_lowerCAmelCase )
UpperCAmelCase = int(max_value - min_value ) + 1
UpperCAmelCase = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 705
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase ={
"gpt-neox-20b": 2048,
}
class __magic_name__ ( _a):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str=None ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : List[str]=None ,__SCREAMING_SNAKE_CASE : List[Any]="<|endoftext|>" ,__SCREAMING_SNAKE_CASE : Any="<|endoftext|>" ,__SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" ,__SCREAMING_SNAKE_CASE : Union[str, Any]=False ,**__SCREAMING_SNAKE_CASE : Tuple ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE ,pre_tok_state.pop("type" ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = add_prefix_space
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
UpperCAmelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : "Conversation" ):
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 405
| 0
|
"""simple docstring"""
lowerCamelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __lowerCamelCase ( a_ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = ''''''.join(bin(a_ )[2:].zfill(8 ) for byte in data )
__SCREAMING_SNAKE_CASE :Optional[int] = len(a_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__SCREAMING_SNAKE_CASE :Tuple = b'''=''' * ((6 - len(a_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(a_ ) % 6)
else:
__SCREAMING_SNAKE_CASE :Optional[int] = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(a_ ) , 6 ) ).encode()
+ padding
)
def __lowerCamelCase ( a_ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(a_ , a_ ) and not isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(a_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(a_ , a_ ):
try:
__SCREAMING_SNAKE_CASE :Tuple = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__SCREAMING_SNAKE_CASE :List[str] = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(a_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__SCREAMING_SNAKE_CASE :Tuple = encoded_data[:-padding]
__SCREAMING_SNAKE_CASE :List[str] = ''''''.join(
bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__SCREAMING_SNAKE_CASE :int = ''''''.join(
bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data )
__SCREAMING_SNAKE_CASE :Dict = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(a_ ) , 8 )
]
return bytes(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 498
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a ( __UpperCAmelCase : Tuple ) -> Dict:
__magic_name__: List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__magic_name__: str = emb.weight.shape
__magic_name__: str = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__magic_name__: int = emb.weight.data
return lin_layer
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
__magic_name__: List[Any] = {}
for old_key in state_dict.keys():
__magic_name__: List[str] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__magic_name__: Tuple = key.replace("""moe_layer.experts.0""" , f'ffn.experts.expert_{expert_idx}' )
else:
__magic_name__: Union[str, Any] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__magic_name__: List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__magic_name__: str = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__magic_name__: Tuple = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__magic_name__: str = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__magic_name__: Dict = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__magic_name__: Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__magic_name__: Optional[Any] = state_dict[old_key]
return new_dict
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str = WEIGHTS_NAME ) -> Optional[Any]:
__magic_name__: str = []
__magic_name__: Any = 0
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
for expert in range(__UpperCAmelCase ):
__magic_name__: List[Any] = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(__UpperCAmelCase ):
__magic_name__: Any = torch.load(__UpperCAmelCase )["""model"""]
remove_ignore_keys_(__UpperCAmelCase )
__magic_name__: List[str] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Union[str, Any] = os.path.join(
__UpperCAmelCase , weights_name.replace(""".bin""" , f'-{len(__UpperCAmelCase )+1:05d}-of-???.bin' ) )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCAmelCase )[0]].dtype )
# Add the last block
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , weights_name.replace(""".bin""" , f'-{len(__UpperCAmelCase )+1:05d}-of-???.bin' ) )
__magic_name__: Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(__UpperCAmelCase )
__magic_name__: Union[str, Any] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Union[str, Any] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCAmelCase ) == 1:
__magic_name__: Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCAmelCase , __UpperCAmelCase )
# Otherwise, let's build the index
__magic_name__: List[str] = {}
for idx, shard in enumerate(__UpperCAmelCase ):
__magic_name__: Any = weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin' )
__magic_name__: Dict = os.path.join(__UpperCAmelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
for key in shard:
__magic_name__: Dict = shard_file
# Add the metadata
__magic_name__: Any = {"""total_size""": total_size}
__magic_name__: int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , """w""" , encoding="""utf-8""" ) as f:
__magic_name__: List[Any] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase , __lowerCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
__lowerCamelCase = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
__lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 713
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = GPTSwaTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Dict ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__: Optional[Any] = GPTSwaTokenizer(__snake_case , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int ) -> Tuple:
__magic_name__: str = """This is a test"""
__magic_name__: Dict = """This is a test"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: Any = """<s>"""
__magic_name__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__snake_case ) , 2_0_0_0 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: int = GPTSwaTokenizer(__snake_case )
__magic_name__: Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
__magic_name__: int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__snake_case , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__magic_name__: List[Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
__magic_name__: Dict = tokenizer.convert_ids_to_tokens(__snake_case )
# fmt: off
self.assertListEqual(
__snake_case , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: int = GPTSwaTokenizer(__snake_case )
__magic_name__: Optional[Any] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__magic_name__: Optional[int] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__snake_case , __snake_case ):
self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(__snake_case , __snake_case ):
self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: Tuple = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__magic_name__: str = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__snake_case , )
| 213
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : Tuple = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=8):
__snake_case = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , A_ : MultilingualCLIP , A_ : XLMRobertaTokenizer , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
__snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase ( self : Any , A_ : List[str] , A_ : str , A_ : str , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any] ) -> Dict:
if latents is None:
__snake_case = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__snake_case = latents.to(A_ )
__snake_case = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : List[str] , A_ : Optional[int] , A_ : Any , A_ : Optional[Any] , A_ : Any , A_ : List[str]=None , ) -> str:
__snake_case = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
__snake_case = self.tokenizer(
A_ , padding='''max_length''' , truncation=A_ , max_length=77 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors='''pt''' , )
__snake_case = text_inputs.input_ids
__snake_case = self.tokenizer(A_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
__snake_case = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__snake_case = text_input_ids.to(A_ )
__snake_case = text_inputs.attention_mask.to(A_ )
__snake_case , __snake_case = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
__snake_case = prompt_embeds.repeat_interleave(A_ , dim=0 )
__snake_case = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
__snake_case = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''''''] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="
f" {type(A_ )}." )
elif isinstance(A_ , A_ ):
__snake_case = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__snake_case = negative_prompt
__snake_case = self.tokenizer(
A_ , padding='''max_length''' , max_length=77 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors='''pt''' , )
__snake_case = uncond_input.input_ids.to(A_ )
__snake_case = uncond_input.attention_mask.to(A_ )
__snake_case , __snake_case = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = negative_prompt_embeds.shape[1]
__snake_case = negative_prompt_embeds.repeat(1 , A_ )
__snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
__snake_case = uncond_text_encoder_hidden_states.shape[1]
__snake_case = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
__snake_case = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
__snake_case = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowercase ( self : str , A_ : Dict=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__snake_case = torch.device(f"cuda:{gpu_id}" )
__snake_case = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def lowercase ( self : Any , A_ : Dict=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__snake_case = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
__snake_case , __snake_case = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
__snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self : Tuple ) -> List[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : str , A_ : Union[str, List[str]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Optional[Union[str, List[str]]] = None , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[Any]:
if isinstance(A_ , A_ ):
__snake_case = 1
elif isinstance(A_ , A_ ):
__snake_case = len(A_ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(A_ )}" )
__snake_case = self._execution_device
__snake_case = batch_size * num_images_per_prompt
__snake_case = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
__snake_case = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
__snake_case = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
__snake_case = image_embeds.repeat_interleave(A_ , dim=0 )
__snake_case = negative_image_embeds.repeat_interleave(A_ , dim=0 )
__snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
__snake_case = self.scheduler.timesteps
__snake_case = self.unet.config.in_channels
__snake_case , __snake_case = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
__snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
__snake_case = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case = noise_pred.chunk(2 )
__snake_case , __snake_case = variance_pred.chunk(2 )
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
__snake_case = self.movq.decode(A_ , force_not_quantize=A_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__snake_case = image * 0.5 + 0.5
__snake_case = image.clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : Union[str, Any] = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@staticmethod
def A_ ( lowerCamelCase ):
snake_case__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , *lowerCamelCase ):
snake_case__ = accelerate_config_file
def A_ ( self ):
snake_case__ = "not installed"
if is_safetensors_available():
import safetensors
snake_case__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case__ = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case__ = "not installed"
snake_case__ = snake_case__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase ):
snake_case__ = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F"""\t{accelerate_config}"""
)
snake_case__ = "not installed"
snake_case__ = "NA"
if is_torch_available():
import torch
snake_case__ = torch.__version__
snake_case__ = torch.cuda.is_available()
snake_case__ = "not installed"
snake_case__ = "NA"
if is_tf_available():
import tensorflow as tf
snake_case__ = tf.__version__
try:
# deprecated in v2.1
snake_case__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ = bool(tf.config.list_physical_devices("GPU" ) )
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ = flax.__version__
snake_case__ = jax.__version__
snake_case__ = jaxlib.__version__
snake_case__ = jax.lib.xla_bridge.get_backend().platform
snake_case__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def A_ ( lowerCamelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 530
| 0
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def lowerCamelCase (a_ :int , a_ :Tuple , a_ :Union[str, Any] , a_ :Any=None) -> Dict:
lowercase :Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , lowercase :Any = True, True
lowercase :str = dfs(a_ , a_ , a_ , a_)
return path
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[Any]) -> Tuple:
lowercase :Union[str, Any] = 0
lowercase :Dict = -1
for i in range(a_):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
lowercase :Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase (a_ :Dict , a_ :Optional[Any]) -> Optional[int]:
lowercase :Union[str, Any] = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
lowercase , lowercase :int = check_circuit_or_path(a_ , a_)
if check == 3:
print('''graph is not Eulerian''')
print('''no path''')
return
lowercase :int = 1
if check == 2:
lowercase :Tuple = odd_node
print('''graph has a Euler path''')
if check == 1:
print('''graph has a Euler cycle''')
lowercase :Optional[Any] = dfs(a_ , a_ , a_)
print(a_)
def lowerCamelCase () -> Any:
lowercase :int = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase :Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase :Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase :int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase :Tuple = {
1: [],
2: []
# all degree is zero
}
lowercase :int = 10
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
if __name__ == "__main__":
main()
| 677
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677
| 1
|
import math
class __a :
def UpperCamelCase ( self : Optional[Any] , snake_case_ : list[list[float]] , snake_case_ : list[int])-> int:
__lowerCAmelCase =0.0
__lowerCAmelCase =0.0
for i in range(len(snake_case_)):
da += math.pow((sample[i] - weights[0][i]) , 2)
da += math.pow((sample[i] - weights[1][i]) , 2)
return 0 if da > da else 1
return 0
def UpperCamelCase ( self : Optional[int] , snake_case_ : list[list[int | float]] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : float)-> list[list[int | float]]:
for i in range(len(snake_case_)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCAmelCase ( ) -> None:
# Training Examples ( m, n )
__lowerCAmelCase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCAmelCase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCAmelCase =SelfOrganizingMap()
__lowerCAmelCase =3
__lowerCAmelCase =0.5
for _ in range(__lowerCamelCase ):
for j in range(len(__lowerCamelCase ) ):
# training sample
__lowerCAmelCase =training_samples[j]
# Compute the winning vector
__lowerCAmelCase =self_organizing_map.get_winner(__lowerCamelCase , __lowerCamelCase )
# Update the winning vector
__lowerCAmelCase =self_organizing_map.update(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# classify test sample
__lowerCAmelCase =[0, 0, 0, 1]
__lowerCAmelCase =self_organizing_map.get_winner(__lowerCamelCase , __lowerCamelCase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 721
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> int:
__lowerCAmelCase =(images / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase =numpy_to_pil(__lowerCamelCase )
return images
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
if images.ndim == 3:
__lowerCAmelCase =images[None, ...]
__lowerCAmelCase =(images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCAmelCase =[Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
__lowerCAmelCase =[Image.fromarray(__lowerCamelCase ) for image in images]
return pil_images
| 456
| 0
|
from math import factorial
def __a ( A__ : int , A__ : int , A__ : float ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
SCREAMING_SNAKE_CASE = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE = float(factorial(A__ ) )
coefficient /= factorial(A__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 16
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A="" , _A="train" ):
'''simple docstring'''
assert os.path.isdir(_A )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_A , _A )
if not os.path.isfile(_A ):
continue
self.documents.append(_A )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('''/''' )[-1]
with open(_A , encoding='''utf-8''' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_A )
return document_name, story_lines, summary_lines
def _lowerCAmelCase(a : str ) -> Any:
_SCREAMING_SNAKE_CASE =list(filter(lambda a : len(a ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(a ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(a )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda a : not t.startswith('''@highlight''' ) , a ) )
return story_lines, summary_lines
def _lowerCAmelCase(a : Optional[Any] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase(a : Optional[int] , a : Optional[Any] , a : Tuple ) -> Union[str, Any]:
if len(a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(a )) )
return sequence
def _lowerCAmelCase(a : Optional[int] , a : Any ) -> Dict:
_SCREAMING_SNAKE_CASE =torch.ones_like(a )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase(a : List[str] , a : Union[str, Any] , a : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase(a : Optional[int] , a : str ) -> int:
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(a )
return torch.tensor(a )
| 255
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=True ) -> Optional[int]:
"""simple docstring"""
model.train()
A__ : Union[str, Any] = model(lowerCAmelCase__ )
A__ : List[Any] = F.mse_loss(lowerCAmelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def snake_case__ ( __lowercase , __lowercase=False ) -> Optional[int]:
"""simple docstring"""
set_seed(4_2 )
A__ : Union[str, Any] = RegressionModel()
A__ : Optional[Any] = deepcopy(lowerCAmelCase__ )
A__ : Tuple = RegressionDataset(length=8_0 )
A__ : str = DataLoader(lowerCAmelCase__ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
A__ : Dict = AdamW(params=model.parameters() , lr=1E-3 )
A__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
A__ : List[str] = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda __lowercase : epoch**0.65 )
A__ : Tuple = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda __lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
A__ : Optional[int] = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
A__ : List[str] = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case__ ( __lowercase ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] = get_training_setup(lowerCAmelCase__ )
# Use a single batch
A__ : Optional[Any] = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ : int = accelerator.gather((ddp_input, ddp_target) )
A__ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
A__ : Optional[Any] = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def snake_case__ ( __lowercase ) -> Dict:
"""simple docstring"""
A__ : str = get_training_setup(lowerCAmelCase__ )
# Use a single batch
A__ : Dict = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
A__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
A__ : Optional[Any] = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def snake_case__ ( __lowercase=False , __lowercase=False ) -> Dict:
"""simple docstring"""
A__ : Optional[int] = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ : Union[str, Any] = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
A__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
A__ : int = accelerator.gather((ddp_input, ddp_target) )
A__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
A__ : str = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def snake_case__ ( __lowercase=False , __lowercase=False ) -> Tuple:
"""simple docstring"""
A__ : str = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ : Optional[Any] = get_training_setup(lowerCAmelCase__ , lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
A__ : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
A__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
A__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
A__ : Optional[int] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case__ ( ) -> str:
"""simple docstring"""
A__ : Optional[int] = Accelerator()
A__ : int = RegressionDataset(length=8_0 )
A__ : str = DataLoader(lowerCAmelCase__ , batch_size=1_6 )
A__ : Dict = RegressionDataset(length=9_6 )
A__ : Tuple = DataLoader(lowerCAmelCase__ , batch_size=1_6 )
A__ : Dict = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case__ ( ) -> str:
"""simple docstring"""
A__ : Union[str, Any] = Accelerator()
A__ : str = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCAmelCase__ , lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( __lowercase ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 709
|
import requests
snake_case : int = '' # <-- Put your OpenWeatherMap appid here!
snake_case : int = 'https://api.openweathermap.org/data/2.5/'
def snake_case__ ( __lowercase = "Chicago" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __lowercase = "Kolkata, India" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __lowercase = 55.68 , __lowercase = 12.57 , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case : int = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 182
| 0
|
import torch
def UpperCamelCase__ ( ) -> str:
if torch.cuda.is_available():
_lowercase = torch.cuda.device_count()
else:
_lowercase = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 287
|
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
a__ = {} # Mapping from char to TrieNode
a__ = False
def lowercase__ ( self , _a ):
"""simple docstring"""
for word in words:
self.insert(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self
for char in word:
if char not in curr.nodes:
a__ = TrieNode()
a__ = curr.nodes[char]
a__ = True
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self
for char in word:
if char not in curr.nodes:
return False
a__ = curr.nodes[char]
return curr.is_leaf
def lowercase__ ( self , _a ):
"""simple docstring"""
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
a__ = False
return len(curr.nodes ) == 0
a__ = word[index]
a__ = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a__ = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( a : TrieNode , a : str ):
if node.is_leaf:
print(a , end=' ' )
for key, value in node.nodes.items():
print_words(a , word + key )
def lowerCAmelCase_ ( ):
a__ = 'banana bananas bandana band apple all beast'.split()
a__ = TrieNode()
root.insert_many(a )
# print_words(root, "")
assert all(root.find(a ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCAmelCase_ ( a : str , a : bool ):
print(str(a ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 394
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__snake_case : List[str] = logging.getLogger()
__snake_case : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
A__ : List[str] ={"""source""": """What is love ?""", """target""": """life"""}
A__ : List[Any] ={"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A__ : Optional[int] ="""\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"{split}.{field}" ) , """w""" ) as f:
f.write(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> Tuple:
'''simple docstring'''
A__ : int =self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] =os.path.join(lowerCAmelCase_ , """output""" )
A__ : Optional[Any] =os.path.join(lowerCAmelCase_ , """data""" )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
A__ : List[str] =f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
A__ : int =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
A__ : Dict =os.path.join(lowerCAmelCase_ , """metrics.json""" )
with open(lowerCAmelCase_ ) as f:
A__ : Optional[int] =json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Dict =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : str =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 687
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = "Speech2TextFeatureExtractor"
__lowerCamelCase : Optional[int] = "Speech2TextTokenizer"
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
A : Any = self.feature_extractor
A : Optional[Any] = False
def __call__( self, *lowerCamelCase__, **lowerCamelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__, **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A : Optional[Any] = kwargs.pop("""raw_speech""" )
else:
A : Optional[Any] = kwargs.pop("""audio""", lowerCamelCase__ )
A : Any = kwargs.pop("""sampling_rate""", lowerCamelCase__ )
A : Tuple = kwargs.pop("""text""", lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A : List[str] = args[0]
A : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A : List[str] = self.feature_extractor(lowerCamelCase__, *lowerCamelCase__, sampling_rate=lowerCamelCase__, **lowerCamelCase__ )
if text is not None:
A : List[str] = self.tokenizer(lowerCamelCase__, **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : int = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__, **lowerCamelCase__ )
@contextmanager
def _lowerCAmelCase ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A : Any = True
A : Any = self.tokenizer
yield
A : Any = self.feature_extractor
A : List[str] = False
| 662
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowercase : str = logging.get_logger(__name__)
lowercase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Dict = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase : Any = {
"""yjernite/retribert-base-uncased""": 512,
}
lowercase : Optional[int] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class __A( __UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_INIT_CONFIGURATION
__A = RetriBertTokenizer
__A = ["input_ids", "attention_mask"]
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
"""simple docstring"""
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', A ) != do_lower_case
or normalizer_state.get('''strip_accents''', A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', A ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(A, normalizer_state.pop('''type''' ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**A )
_UpperCamelCase = do_lower_case
def _UpperCamelCase ( self, A, A=None ):
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 105
|
import datasets
from .evaluate import evaluate
lowercase : str = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase : Tuple = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase : List[str] = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def _UpperCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=A, predictions=A )
return score
| 105
| 1
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 50 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 65
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def _A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : str = inputs["prompt"]
lowerCAmelCase__ : Optional[Any] = inputs["generator"]
lowerCAmelCase__ : str = inputs["num_inference_steps"]
lowerCAmelCase__ : int = inputs["output_type"]
if "image" in inputs:
lowerCAmelCase__ : Optional[int] = inputs["image"]
else:
lowerCAmelCase__ : str = None
if "mask_image" in inputs:
lowerCAmelCase__ : List[str] = inputs["mask_image"]
else:
lowerCAmelCase__ : str = None
if "original_image" in inputs:
lowerCAmelCase__ : Optional[int] = inputs["original_image"]
else:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ , lowerCAmelCase__ : Dict = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
lowerCAmelCase__ : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ : Optional[Any] = image
if mask_image is not None:
lowerCAmelCase__ : Optional[int] = mask_image
if original_image is not None:
lowerCAmelCase__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
lowerCAmelCase__ : Optional[int] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : Tuple = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Any = inputs["generator"]
lowerCAmelCase__ : Union[str, Any] = inputs["num_inference_steps"]
lowerCAmelCase__ : str = inputs["output_type"]
# inputs with prompt converted to embeddings
lowerCAmelCase__ : int = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ : Tuple = image
if mask_image is not None:
lowerCAmelCase__ : int = mask_image
if original_image is not None:
lowerCAmelCase__ : List[Any] = original_image
lowerCAmelCase__ : Dict = pipe_loaded(**a__ )[0]
lowerCAmelCase__ : int = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : int = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Union[str, Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : str = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ : str = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : Union[str, Any] = pipe_loaded(**a__ )[0]
lowerCAmelCase__ : Union[str, Any] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1e-4 )
| 378
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__A ='scheduler_config.json'
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = 1
lowerCAmelCase :Union[str, Any] = 2
lowerCAmelCase :Optional[int] = 3
lowerCAmelCase :Optional[int] = 4
lowerCAmelCase :Any = 5
lowerCAmelCase :Tuple = 6
lowerCAmelCase :List[Any] = 7
lowerCAmelCase :str = 8
lowerCAmelCase :List[Any] = 9
lowerCAmelCase :List[str] = 10
lowerCAmelCase :Union[str, Any] = 11
lowerCAmelCase :Optional[int] = 12
lowerCAmelCase :str = 13
lowerCAmelCase :Dict = 14
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case :
lowerCAmelCase :str = SCHEDULER_CONFIG_NAME
lowerCAmelCase :Union[str, Any] = []
lowerCAmelCase :List[str] = True
@classmethod
def snake_case__ ( cls , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , )
return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase):
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
return self._get_compatibles()
@classmethod
def snake_case__ ( cls):
UpperCAmelCase__ : List[Any] = list(set([cls.__name__] + cls._compatibles))
UpperCAmelCase__ : int = importlib.import_module(__name__.split(""".""")[0])
UpperCAmelCase__ : Dict = [
getattr(_lowerCamelCase , _lowerCamelCase) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase)
]
return compatible_classes
| 113
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE__ ( ):
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case__ = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = [1, 2]
snake_case__ = {"a": 1, "b": 2}
snake_case__ = {"a": [1, 2], "b": [3, 4]}
snake_case__ = {"a": {"1": 1}, "b": 2}
snake_case__ = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case__ = [2, 3]
snake_case__ = {"a": 2, "b": 3}
snake_case__ = {"a": [2, 3], "b": [4, 5]}
snake_case__ = {"a": {"1": 2}, "b": 3}
snake_case__ = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 276
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 276
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=32 , a__=3 , a__=4 , a__=[10, 20, 30, 40] , a__=[2, 2, 3, 2] , a__=True , a__=True , a__=37 , a__="gelu" , a__=10 , a__=0.02 , a__=["stage2", "stage3", "stage4"] , a__=[2, 3, 4] , a__=None , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Dict = num_stages
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : List[str] = depths
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Any = out_features
_lowerCamelCase : Union[str, Any] = out_indices
_lowerCamelCase : Any = scope
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Dict = ConvNextModel(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : Dict = model(a__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = ConvNextForImageClassification(a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[str] = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ConvNextBackbone(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[str] = model(a__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = ConvNextBackbone(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[Any] = model(a__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = config_and_inputs
_lowerCamelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = ConvNextModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37)
def __snake_case ( self):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(a__)
_lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__)
def __snake_case ( self):
"""simple docstring"""
def check_hidden_states_output(a__ , a__ , a__):
_lowerCamelCase : Any = model_class(a__)
model.to(a__)
model.eval()
with torch.no_grad():
_lowerCamelCase : Tuple = model(**self._prepare_for_class(a__ , a__))
_lowerCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
check_hidden_states_output(a__ , a__ , a__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(a__ , a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
@slow
def __snake_case ( self):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = ConvNextModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __UpperCAmelCase( ):
_lowerCamelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(a__)
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**a__)
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a__)
_lowerCamelCase : int = torch.tensor([-0.0260, -0.4739, 0.1911]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4))
@require_torch
class __A ( unittest.TestCase ,lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ = ConvNextConfig
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ConvNextModelTester(self)
| 613
|
import math
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = len(lowercase_ )
_lowerCamelCase : Optional[int] = int(math.floor(math.sqrt(lowercase_ ) ) )
_lowerCamelCase : Tuple = 0
while arr[min(lowercase_ , lowercase_ ) - 1] < x:
_lowerCamelCase : Optional[Any] = step
step += int(math.floor(math.sqrt(lowercase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowerCamelCase : int = prev + 1
if prev == min(lowercase_ , lowercase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
_lowerCamelCase = int(input('Enter the number to be searched:\n'))
_lowerCamelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'''Number {x} is at index {res}''')
| 613
| 1
|
from __future__ import annotations
class A :
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def a_ ( self : int , __lowerCAmelCase : list[float] , __lowerCAmelCase : list[float] ) -> Optional[Any]:
"""simple docstring"""
if len(__lowerCamelCase ) < self.order:
A__ = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
A__ = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__lowerCamelCase )}'
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
A__ = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__lowerCamelCase )}'
)
raise ValueError(__lowerCamelCase )
A__ = a_coeffs
A__ = b_coeffs
def a_ ( self : Tuple , __lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 176
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (_a , unittest.TestCase ):
lowercase_ : Optional[Any] = DebertaVaTokenizer
lowercase_ : Any = DebertaVaTokenizerFast
lowercase_ : List[str] = True
lowercase_ : str = True
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCAmelCase__ = '''this is a test'''
lowerCAmelCase__ = '''this is a test'''
return input_text, output_text
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(__lowerCamelCase ) , 3_00_01 )
def A__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = ''' \tHeLLo!how \n Are yoU? '''
lowerCAmelCase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def A__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def A__ ( self : Dict ):
"""simple docstring"""
pass
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[int] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[int] ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Tuple ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = ''' \tHeLLo!how \n Are yoU? '''
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = '''This is a test'''
lowerCAmelCase__ = [13, 1, 43_98, 25, 21, 12_89]
lowerCAmelCase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
lowerCAmelCase__ = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
lowerCAmelCase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowerCAmelCase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = DebertaVaTokenizer(__lowerCamelCase )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def A__ ( self : str ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 615
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
snake_case : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : Any = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
snake_case : Tuple = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
snake_case : List[Any] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class snake_case_ (__UpperCAmelCase ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ElectraTokenizer
def __init__( self :Tuple ,__snake_case :Any=None ,__snake_case :List[str]=None ,__snake_case :List[Any]=True ,__snake_case :str="[UNK]" ,__snake_case :Optional[Any]="[SEP]" ,__snake_case :Union[str, Any]="[PAD]" ,__snake_case :Optional[int]="[CLS]" ,__snake_case :Optional[Any]="[MASK]" ,__snake_case :Union[str, Any]=True ,__snake_case :Optional[int]=None ,**__snake_case :str ,) -> Optional[Any]:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=None ) -> Optional[Any]:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :Optional[Any] = None ) -> Any:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Any ,__snake_case :Dict ,__snake_case :int = None ) -> Any:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 717
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657
| 0
|
a__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case : Union[str, Any] = year // 100
_snake_case : Union[str, Any] = (5 * (century % 4) + 2) % 7
_snake_case : int = year % 100
_snake_case : Optional[int] = centurian % 12
_snake_case : int = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase = concatenate_datasets
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadManager
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 585
| 0
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = args.pruning_method
_UpperCAmelCase : List[Any] = args.threshold
_UpperCAmelCase : List[str] = args.model_name_or_path.rstrip("/" )
_UpperCAmelCase : str = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_UpperCAmelCase : Optional[Any] = torch.load(os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
_UpperCAmelCase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase : str = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_UpperCAmelCase : Tuple = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_UpperCAmelCase : Optional[Any] = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase : Union[str, Any] = name[:-6]
_UpperCAmelCase : List[str] = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase : Tuple = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase : Union[str, Any] = name[:-6]
_UpperCAmelCase : str = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase : List[Any] = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase : str = name[:-6]
_UpperCAmelCase : List[Any] = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase , _UpperCAmelCase : Any = -0.1, 1.1
_UpperCAmelCase : Any = torch.sigmoid(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = s * (r - l) + l
_UpperCAmelCase : List[Any] = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_UpperCAmelCase : List[Any] = os.path.join(
os.path.dirname(__lowerCAmelCase ) , F"""bertarized_{os.path.basename(__lowerCAmelCase )}""" )
if not os.path.isdir(__lowerCAmelCase ):
shutil.copytree(__lowerCAmelCase , __lowerCAmelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase ( _a ,_a=False ) -> Any:
try:
UpperCAmelCase_: Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_: Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_: int = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
_lowerCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
def lowercase ( _a ) -> Dict:
return unittest.skip("Test was skipped" )(snake_case__ )
def lowercase ( _a ) -> int:
return unittest.skipUnless(_run_slow_tests ,"test is slow" )(snake_case__ )
def lowercase ( _a ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() ,"test requires only a CPU" )(snake_case__ )
def lowercase ( _a ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() ,"test requires a GPU" )(snake_case__ )
def lowercase ( _a ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() ,"test requires a XPU" )(snake_case__ )
def lowercase ( _a ) -> int:
return unittest.skipUnless(is_mps_available() ,"test requires a `mps` backend support in `torch`" )(snake_case__ )
def lowercase ( _a ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"test requires the Hugging Face suite" )(snake_case__ )
def lowercase ( _a ) -> Any:
return unittest.skipUnless(is_bnb_available() ,"test requires the bitsandbytes library" )(snake_case__ )
def lowercase ( _a ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() ,"test requires TPU" )(snake_case__ )
def lowercase ( _a ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"test requires a GPU" )(snake_case__ )
def lowercase ( _a ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"test requires a XPU" )(snake_case__ )
def lowercase ( _a ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"test requires multiple GPUs" )(snake_case__ )
def lowercase ( _a ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"test requires multiple XPUs" )(snake_case__ )
def lowercase ( _a ) -> str:
return unittest.skipUnless(is_safetensors_available() ,"test requires safetensors" )(snake_case__ )
def lowercase ( _a ) -> Union[str, Any]:
return unittest.skipUnless(is_deepspeed_available() ,"test requires DeepSpeed" )(snake_case__ )
def lowercase ( _a ) -> Optional[int]:
return unittest.skipUnless(is_torch_version(">=" ,"1.12.0" ) ,"test requires torch version >= 1.12.0" )(snake_case__ )
def lowercase ( _a=None ,_a=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case__ ,version=snake_case__ )
return unittest.skipUnless(is_torch_version(">=" ,snake_case__ ) ,f"test requires torch version >= {version}" )(snake_case__ )
def lowercase ( _a ) -> str:
return unittest.skipUnless(is_tensorboard_available() ,"test requires Tensorboard" )(snake_case__ )
def lowercase ( _a ) -> List[str]:
return unittest.skipUnless(is_wandb_available() ,"test requires wandb" )(snake_case__ )
def lowercase ( _a ) -> str:
return unittest.skipUnless(is_comet_ml_available() ,"test requires comet_ml" )(snake_case__ )
_lowerCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase ( _a ) -> List[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available ,"test requires at least one tracker to be available and for `comet_ml` to not be installed" ,)(snake_case__ )
class UpperCAmelCase__ ( unittest.TestCase ):
snake_case_ = True
@classmethod
def snake_case_ ( cls ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase_ )
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = mocks if isinstance(lowerCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase ( _a ) -> List[str]:
UpperCAmelCase_: Dict = AcceleratorState()
UpperCAmelCase_: Optional[Any] = tensor[None].clone().to(state.device )
UpperCAmelCase_: int = gather(snake_case__ ).cpu()
UpperCAmelCase_: Dict = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,snake_case__ ):
return False
return True
class UpperCAmelCase__ :
def __init__( self , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = returncode
UpperCAmelCase_: Optional[int] = stdout
UpperCAmelCase_: List[Any] = stderr
async def lowercase ( _a ,_a ) -> str:
while True:
UpperCAmelCase_: int = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def lowercase ( _a ,_a=None ,_a=None ,_a=None ,_a=False ,_a=False ) -> _RunOutput:
if echo:
print("\nRunning: " ," ".join(snake_case__ ) )
UpperCAmelCase_: List[Any] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=snake_case__ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=snake_case__ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_: Dict = []
UpperCAmelCase_: Any = []
def tee(_a ,_a ,_a ,_a="" ):
UpperCAmelCase_: List[str] = line.decode("utf-8" ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ ,snake_case__ ,file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda _a : tee(snake_case__ ,snake_case__ ,sys.stdout ,label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda _a : tee(snake_case__ ,snake_case__ ,sys.stderr ,label="stderr:" ) ) ),
] ,timeout=snake_case__ ,)
return _RunOutput(await p.wait() ,snake_case__ ,snake_case__ )
def lowercase ( _a ,_a=None ,_a=None ,_a=180 ,_a=False ,_a=True ) -> _RunOutput:
UpperCAmelCase_: Any = asyncio.get_event_loop()
UpperCAmelCase_: Optional[Any] = loop.run_until_complete(
_stream_subprocess(snake_case__ ,env=snake_case__ ,stdin=snake_case__ ,timeout=snake_case__ ,quiet=snake_case__ ,echo=snake_case__ ) )
UpperCAmelCase_: Union[str, Any] = " ".join(snake_case__ )
if result.returncode > 0:
UpperCAmelCase_: List[Any] = "\n".join(result.stderr )
raise RuntimeError(
f"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCAmelCase__ ( __lowerCAmelCase ):
pass
def lowercase ( _a ,_a=False ) -> Any:
try:
UpperCAmelCase_: Dict = subprocess.check_output(snake_case__ ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ ,"decode" ):
UpperCAmelCase_: Any = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 137
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.speecht5')
_SCREAMING_SNAKE_CASE = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_SCREAMING_SNAKE_CASE = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_SCREAMING_SNAKE_CASE = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_SCREAMING_SNAKE_CASE = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_SCREAMING_SNAKE_CASE = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_SCREAMING_SNAKE_CASE = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_SCREAMING_SNAKE_CASE = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_SCREAMING_SNAKE_CASE = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Tuple , snake_case__ :List[Any] , snake_case__ :int , snake_case__ :Any) -> Union[str, Any]:
for attribute in key.split("""."""):
_A = getattr(snake_case__ , snake_case__)
if weight_type is not None:
_A = getattr(snake_case__ , snake_case__).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
else:
_A = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''')
def snake_case ( snake_case__ :str , snake_case__ :Union[str, Any]) -> List[Any]:
for key in ignore_keys:
if key.endswith(""".*"""):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case ( snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> Union[str, Any]:
_A = []
if task == "s2t":
_A = hf_model.speechta.encoder.prenet.feature_encoder
_A = MAPPING_S2T
_A = IGNORE_KEYS_S2T
elif task == "t2s":
_A = None
_A = MAPPING_T2S
_A = IGNORE_KEYS_T2S
elif task == "s2s":
_A = hf_model.speechta.encoder.prenet.feature_encoder
_A = MAPPING_S2S
_A = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__):
logger.info(F'''{name} was ignored''')
continue
_A = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
_A = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_A = True
if "*" in mapped_key:
_A = name.split(snake_case__)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , snake_case__)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "bias" in name:
_A = """bias"""
elif "weight" in name:
_A = """weight"""
elif "running_mean" in name:
_A = """running_mean"""
elif "running_var" in name:
_A = """running_var"""
elif "num_batches_tracked" in name:
_A = """num_batches_tracked"""
else:
_A = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :List[Any] , snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> List[str]:
_A = full_name.split("""conv_layers.""")[-1]
_A = name.split(""".""")
_A = int(items[0])
_A = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
@torch.no_grad()
def snake_case ( snake_case__ :List[Any] , snake_case__ :Tuple , snake_case__ :Optional[int] , snake_case__ :List[Any]=None , snake_case__ :Tuple=None , snake_case__ :Dict=None , ) -> Optional[Any]:
if config_path is not None:
_A = SpeechTaConfig.from_pretrained(snake_case__)
else:
_A = SpeechTaConfig()
if task == "s2t":
_A = config.max_text_positions
_A = SpeechTaForSpeechToText(snake_case__)
elif task == "t2s":
_A = 1_876
_A = 600
_A = config.max_speech_positions
_A = SpeechTaForTextToSpeech(snake_case__)
elif task == "s2s":
_A = 1_876
_A = config.max_speech_positions
_A = SpeechTaForSpeechToSpeech(snake_case__)
else:
raise ValueError(F'''Unknown task name: {task}''')
if vocab_path:
_A = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
_A = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__)
_A = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token})
tokenizer.add_tokens(["""<ctc_blank>"""])
_A = SpeechTaFeatureExtractor()
_A = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__)
processor.save_pretrained(snake_case__)
_A = torch.load(snake_case__)
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__)
model.save_pretrained(snake_case__)
if repo_id:
print("""Pushing to the hub...""")
processor.push_to_hub(snake_case__)
model.push_to_hub(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 401
| 0
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase = None ):
snake_case__ = (
os.path.join(lowerCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case__ = Extractor
def A_ ( self , lowerCamelCase ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case__ = os.path.abspath(lowerCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCamelCase ) )
def A_ ( self , lowerCamelCase , lowerCamelCase ):
return force_extract or (
not os.path.isfile(lowerCamelCase ) and not (os.path.isdir(lowerCamelCase ) and os.listdir(lowerCamelCase ))
)
def A_ ( self , lowerCamelCase , lowerCamelCase = False ):
snake_case__ = self.extractor.infer_extractor_format(lowerCamelCase )
if not extractor_format:
return input_path
snake_case__ = self._get_output_path(lowerCamelCase )
if self._do_extract(lowerCamelCase , lowerCamelCase ):
self.extractor.extract(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return output_path
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@classmethod
@abstractmethod
def A_ ( cls , lowerCamelCase , **lowerCamelCase ):
...
@staticmethod
@abstractmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
...
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase ):
_A : List[bytes] = []
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase , "rb" ) as f:
return f.read(lowerCamelCase )
@classmethod
def A_ ( cls , lowerCamelCase , lowerCamelCase = b"" ):
if not magic_number:
snake_case__ = max(len(lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
snake_case__ = cls.read_magic_number(lowerCamelCase , lowerCamelCase )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@classmethod
def A_ ( cls , lowerCamelCase , **lowerCamelCase ):
return tarfile.is_tarfile(lowerCamelCase )
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
def resolved(lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase ) )
def badpath(lowerCamelCase , lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase , lowerCamelCase ) ).startswith(lowerCamelCase )
def badlink(lowerCamelCase , lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case__ = resolved(os.path.join(lowerCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCamelCase )
snake_case__ = resolved(lowerCamelCase )
for finfo in members:
if badpath(finfo.name , lowerCamelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(lowerCamelCase , lowerCamelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(lowerCamelCase , lowerCamelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
snake_case__ = tarfile.open(lowerCamelCase )
tar_file.extractall(lowerCamelCase , members=TarExtractor.safemembers(lowerCamelCase , lowerCamelCase ) )
tar_file.close()
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : List[str] = [b'\x1F\x8B']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
with gzip.open(lowerCamelCase , "rb" ) as gzip_file:
with open(lowerCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase , lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Optional[Any] = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def A_ ( cls , lowerCamelCase , lowerCamelCase = b"" ):
if super().is_extractable(lowerCamelCase , magic_number=lowerCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase , "rb" ) as fp:
snake_case__ = _EndRecData(lowerCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case__ = fp.read(lowerCamelCase ) # CD is where we expect it to be
if len(lowerCamelCase ) == sizeCentralDir:
snake_case__ = struct.unpack(lowerCamelCase , lowerCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with zipfile.ZipFile(lowerCamelCase , "r" ) as zip_file:
zip_file.extractall(lowerCamelCase )
zip_file.close()
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Optional[int] = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
with lzma.open(lowerCamelCase ) as compressed_file:
with open(lowerCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase , lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Any = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
snake_case__ = rarfile.RarFile(lowerCamelCase )
rf.extractall(lowerCamelCase )
rf.close()
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Union[str, Any] = [b'\x28\xb5\x2F\xFD']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
snake_case__ = zstd.ZstdDecompressor()
with open(lowerCamelCase , "rb" ) as ifh, open(lowerCamelCase , "wb" ) as ofh:
dctx.copy_stream(lowerCamelCase , lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : str = [b'\x42\x5A\x68']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
with bza.open(lowerCamelCase , "rb" ) as compressed_file:
with open(lowerCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase , lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with pyazr.SevenZipFile(lowerCamelCase , "r" ) as archive:
archive.extractall(lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Optional[int] = [b'\x04\x22\x4D\x18']
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(lowerCamelCase , "rb" ) as compressed_file:
with open(lowerCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase , lowerCamelCase )
class _SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A_ ( cls ):
return max(
len(lowerCamelCase )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase , lowerCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A_ ( lowerCamelCase , lowerCamelCase ):
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase , magic_number_length=lowerCamelCase )
except OSError:
return b""
@classmethod
def A_ ( cls , lowerCamelCase , lowerCamelCase = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=lowerCamelCase , )
snake_case__ = cls.infer_extractor_format(lowerCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A_ ( cls , lowerCamelCase ): # <Added version="2.4.0"/>
snake_case__ = cls._get_magic_number_max_length()
snake_case__ = cls._read_magic_number(lowerCamelCase , lowerCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase , magic_number=lowerCamelCase ):
return extractor_format
@classmethod
def A_ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = "deprecated" , ):
os.makedirs(os.path.dirname(lowerCamelCase ) , exist_ok=lowerCamelCase )
# Prevent parallel extractions
snake_case__ = str(Path(lowerCamelCase ).with_suffix(".lock" ) )
with FileLock(lowerCamelCase ):
shutil.rmtree(lowerCamelCase , ignore_errors=lowerCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase , lowerCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=lowerCamelCase , )
snake_case__ = extractor if extractor != "deprecated" else extractor_format
else:
snake_case__ = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase , lowerCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=lowerCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase ):
return extractor.extract(lowerCamelCase , lowerCamelCase )
| 276
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : List[str] = DetaImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetaImageProcessor()
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetaImageProcessor(format="coco_panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
lowerCAmelCase_ = "CIDAS/clipseg-rd64-refined"
lowerCAmelCase_ = "image_segmenter"
lowerCAmelCase_ = CLIPSegForImageSegmentation
lowerCAmelCase_ = ["image", "text"]
lowerCAmelCase_ = ["image"]
def __init__( self : List[Any] , *_lowercase : Dict , **_lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Dict , _lowercase : "Image" , _lowercase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowercase , return_tensors="""pt""" )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = self.model(**_lowercase ).logits
return logits
def __a ( self : Tuple , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 379
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 379
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __SCREAMING_SNAKE_CASE , )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = '''roberta'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = RobertaEmbeddings(__SCREAMING_SNAKE_CASE )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __SCREAMING_SNAKE_CASE , )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = '''roberta'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : int = config.num_labels
snake_case__ : str = config.num_hidden_layers
snake_case__ : Optional[int] = DeeRobertaModel(__SCREAMING_SNAKE_CASE )
snake_case__ : int = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , ):
snake_case__ : Tuple = self.num_layers
try:
snake_case__ : List[Any] = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
snake_case__ : int = outputs[1]
snake_case__ : str = self.dropout(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.classifier(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ : Optional[int] = e.message
snake_case__ : Dict = e.exit_layer
snake_case__ : Optional[int] = outputs[0]
if not self.training:
snake_case__ : Any = entropy(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = []
snake_case__ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ : str = MSELoss()
snake_case__ : List[str] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : Optional[Any] = CrossEntropyLoss()
snake_case__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ : Optional[int] = []
for highway_exit in outputs[-1]:
snake_case__ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ : Dict = MSELoss()
snake_case__ : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : int = CrossEntropyLoss()
snake_case__ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__SCREAMING_SNAKE_CASE )
if train_highway:
snake_case__ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ : Union[str, Any] = (loss,) + outputs
if not self.training:
snake_case__ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 38
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116
| 0
|
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowerCAmelCase ) -> Image:
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase = 0
_UpperCAmelCase = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
_UpperCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 720
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__lowerCAmelCase = "\\n\n"
__lowerCAmelCase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__lowerCAmelCase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 16 , __UpperCamelCase : bool = True , __UpperCamelCase : Tuple=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = "cuda"
else:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="pt" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings["input_ids"]
_UpperCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 129
| 0
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> str:
"""simple docstring"""
if not (isinstance(UpperCAmelCase_, UpperCAmelCase_ ) and isinstance(UpperCAmelCase_, UpperCAmelCase_ )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
A__ = len(UpperCAmelCase_ )
A__ = len(UpperCAmelCase_ )
A__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
A__ = 0
A__ = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
A__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
A__ = i
A__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase_ : float, UpperCAmelCase_ : float, UpperCAmelCase_ : float, ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__SCREAMING_SNAKE_CASE : int = BlenderbotConfig
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : List[Any] = """gelu"""
def __init__( self : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any]=13 , lowercase__ : Union[str, Any]=7 , lowercase__ : Optional[int]=True , lowercase__ : Optional[int]=False , lowercase__ : List[str]=99 , lowercase__ : Optional[Any]=32 , lowercase__ : int=2 , lowercase__ : Any=4 , lowercase__ : Union[str, Any]=37 , lowercase__ : List[Any]=0.1 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Tuple=20 , lowercase__ : Tuple=2 , lowercase__ : Optional[Any]=1 , lowercase__ : List[Any]=0 , ) ->Dict:
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def _UpperCAmelCase ( self : Optional[int]) ->Dict:
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1)
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : str , lowercase__ : List[Any]) ->Dict:
"""simple docstring"""
_lowercase = TFBlenderbotModel(config=lowercase__).get_decoder()
_lowercase = inputs_dict["""input_ids"""]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["""attention_mask"""][:1, :]
_lowercase = inputs_dict["""head_mask"""]
_lowercase = 1
# first forward pass
_lowercase = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__)
_lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_lowercase = tf.concat([input_ids, next_tokens] , axis=-1)
_lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_lowercase = model(lowercase__ , attention_mask=lowercase__)[0]
_lowercase = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_lowercase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_lowercase = output_from_no_past[:, -3:, random_slice_idx]
_lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3)
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ):
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = False
def _UpperCAmelCase ( self : Tuple) ->Optional[int]:
"""simple docstring"""
_lowercase = TFBlenderbotModelTester(self)
_lowercase = ConfigTester(self , config_class=lowercase__)
def _UpperCAmelCase ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__)
@require_tokenizers
@require_tf
class __a ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ["""My friends are cool but they eat too many carbs."""]
__SCREAMING_SNAKE_CASE : int = """facebook/blenderbot-400M-distill"""
@cached_property
def _UpperCAmelCase ( self : Tuple) ->int:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def _UpperCAmelCase ( self : Optional[int]) ->Tuple:
"""simple docstring"""
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _UpperCAmelCase ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = self.tokenizer(self.src_text , return_tensors="""tf""")
_lowercase = self.model.generate(
model_inputs.input_ids , )
_lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 704
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ = 100 ):
_lowercase = set()
_lowercase = 0
_lowercase = n + 1 # maximum limit
for a in range(2 , snake_case_ ):
for b in range(2 , snake_case_ ):
_lowercase = a**b # calculates the current power
collect_powers.add(snake_case_ ) # adds the result to the set
return len(snake_case_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 572
| 0
|
def _SCREAMING_SNAKE_CASE ( __snake_case = 5_0_0_0_0_0_0_0 ) -> int:
_UpperCAmelCase = set()
_UpperCAmelCase = int((limit - 2_4) ** (1 / 2) )
_UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __snake_case ) ) )
for primea in primes:
_UpperCAmelCase = primea * primea
for primea in primes:
_UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
_UpperCAmelCase = primea * primea * primea * primea
_UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 108
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 90
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a ( __magic_name__ ):
_snake_case = 42
_snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a ( __magic_name__ ):
_snake_case = 42
_snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 701
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = CodeGenTokenizer
def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : int=False, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Union[str, Any] = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = add_prefix_space
def __snake_case ( self : List[Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], *SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : bool = None, SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any], ):
snake_case : Dict = super().decode(
token_ids=SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case : Optional[int] = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return decoded_text
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple ):
def find_re(SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : Optional[Any] = pattern.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return m.start() if m else -1
snake_case : Union[str, Any] = [re.compile(SCREAMING_SNAKE_CASE_, re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case : Union[str, Any] = list(re.finditer('''^print''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Tuple = completion[: prints[1].start()]
snake_case : List[str] = list(re.finditer('''^def''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Optional[Any] = completion[: defs[1].start()]
snake_case : Tuple = 0
snake_case : List[Any] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE_ )]
else:
return completion
| 555
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : str , **_UpperCamelCase : Optional[Any]):
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def UpperCAmelCase__ ( self : int , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[int]=None):
_lowercase: Union[str, Any] = {}
_lowercase: Union[str, Any] = {}
if prompt is not None:
_lowercase: str = prompt
if generate_kwargs is not None:
_lowercase: Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowercase: List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one")
_lowercase: Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[Any]):
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int]=None):
_lowercase: Tuple = load_image(_UpperCamelCase)
if prompt is not None:
if not isinstance(_UpperCamelCase , _UpperCamelCase):
raise ValueError(
f"Received an invalid text input, got - {type(_UpperCamelCase)} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation.")
_lowercase: Dict = self.model.config.model_type
if model_type == "git":
_lowercase: Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
_lowercase: Dict = self.tokenizer(text=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids
_lowercase: int = [self.tokenizer.cls_token_id] + input_ids
_lowercase: int = torch.tensor(_UpperCamelCase).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
_lowercase: str = self.image_processor(images=_UpperCamelCase , header_text=_UpperCamelCase , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowercase: str = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
_lowercase: Optional[Any] = self.tokenizer(_UpperCamelCase , return_tensors=self.framework)
model_inputs.update(_UpperCamelCase)
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation")
else:
_lowercase: Union[str, Any] = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
_lowercase: Dict = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple=None):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , _UpperCamelCase)
and all(x is None for x in model_inputs["input_ids"])
):
_lowercase: Union[str, Any] = None
if generate_kwargs is None:
_lowercase: str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowercase: Optional[int] = model_inputs.pop(self.model.main_input_name)
_lowercase: List[Any] = self.model.generate(_UpperCamelCase , **_UpperCamelCase , **_UpperCamelCase)
return model_outputs
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Any):
_lowercase: Tuple = []
for output_ids in model_outputs:
_lowercase: Union[str, Any] = {
"generated_text": self.tokenizer.decode(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase , )
}
records.append(_UpperCamelCase)
return records
| 226
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Any = 'RegNetConfig'
# Base docstring
_SCREAMING_SNAKE_CASE : Optional[int] = 'facebook/regnet-y-040'
_SCREAMING_SNAKE_CASE : List[str] = [1, 1_088, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'facebook/regnet-y-040'
_SCREAMING_SNAKE_CASE : List[Any] = 'tabby, tabby cat'
_SCREAMING_SNAKE_CASE : Dict = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[str] = "relu" , ):
super().__init__()
_lowercase: Any = nn.Convad(
_UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=kernel_size // 2 , groups=_UpperCamelCase , bias=_UpperCamelCase , )
_lowercase: Optional[Any] = nn.BatchNormad(_UpperCamelCase)
_lowercase: Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Dict):
_lowercase: Any = self.convolution(_UpperCamelCase)
_lowercase: int = self.normalization(_UpperCamelCase)
_lowercase: Tuple = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : RegNetConfig):
super().__init__()
_lowercase: Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_lowercase: List[str] = config.num_channels
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[Any]):
_lowercase: int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
_lowercase: List[str] = self.embedder(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 2):
super().__init__()
_lowercase: Dict = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , stride=_UpperCamelCase , bias=_UpperCamelCase)
_lowercase: Optional[int] = nn.BatchNormad(_UpperCamelCase)
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : Tensor):
_lowercase: Union[str, Any] = self.convolution(_UpperCamelCase)
_lowercase: Union[str, Any] = self.normalization(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int):
super().__init__()
_lowercase: List[Any] = nn.AdaptiveAvgPoolad((1, 1))
_lowercase: Dict = nn.Sequential(
nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1) , nn.ReLU() , nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Any):
# b c h w -> b c 1 1
_lowercase: Tuple = self.pooler(_UpperCamelCase)
_lowercase: Any = self.attention(_UpperCamelCase)
_lowercase: List[str] = hidden_state * attention
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1):
super().__init__()
_lowercase: Union[str, Any] = in_channels != out_channels or stride != 1
_lowercase: Any = max(1 , out_channels // config.groups_width)
_lowercase: Dict = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase) if should_apply_shortcut else nn.Identity()
)
_lowercase: Any = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase) , )
_lowercase: Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Optional[int]):
_lowercase: Union[str, Any] = hidden_state
_lowercase: Any = self.layer(_UpperCamelCase)
_lowercase: Union[str, Any] = self.shortcut(_UpperCamelCase)
hidden_state += residual
_lowercase: Any = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1):
super().__init__()
_lowercase: Tuple = in_channels != out_channels or stride != 1
_lowercase: Tuple = max(1 , out_channels // config.groups_width)
_lowercase: Dict = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase) if should_apply_shortcut else nn.Identity()
)
_lowercase: Union[str, Any] = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act) , RegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase) , )
_lowercase: str = ACTaFN[config.hidden_act]
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[int]):
_lowercase: Optional[Any] = hidden_state
_lowercase: Tuple = self.layer(_UpperCamelCase)
_lowercase: Optional[Any] = self.shortcut(_UpperCamelCase)
hidden_state += residual
_lowercase: int = self.activation(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , ):
super().__init__()
_lowercase: Optional[int] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
_lowercase: Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , ) , *[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) for _ in range(depth - 1)] , )
def UpperCAmelCase__ ( self : int , _UpperCamelCase : List[str]):
_lowercase: str = self.layers(_UpperCamelCase)
return hidden_state
class A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : RegNetConfig):
super().__init__()
_lowercase: Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_lowercase: Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(_UpperCamelCase , config.depths[1:]):
self.stages.append(RegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase))
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Tensor , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True):
_lowercase: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase: List[Any] = hidden_states + (hidden_state,)
_lowercase: int = stage_module(_UpperCamelCase)
if output_hidden_states:
_lowercase: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase)
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = RegNetConfig
lowerCamelCase : Any = """regnet"""
lowerCamelCase : int = """pixel_values"""
lowerCamelCase : Tuple = True
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Dict):
if isinstance(_UpperCamelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict=False):
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: Dict = value
_SCREAMING_SNAKE_CASE : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_SCREAMING_SNAKE_CASE : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCamelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Tuple):
super().__init__(_UpperCamelCase)
_lowercase: List[Any] = config
_lowercase: List[Any] = RegNetEmbeddings(_UpperCamelCase)
_lowercase: Optional[int] = RegNetEncoder(_UpperCamelCase)
_lowercase: str = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Tensor , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None):
_lowercase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase: Optional[int] = self.embedder(_UpperCamelCase)
_lowercase: Union[str, Any] = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase)
_lowercase: Optional[Any] = encoder_outputs[0]
_lowercase: List[Any] = self.pooler(_UpperCamelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple):
super().__init__(_UpperCamelCase)
_lowercase: List[Any] = config.num_labels
_lowercase: Any = RegNetModel(_UpperCamelCase)
# classification head
_lowercase: List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , ):
_lowercase: str = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase: int = self.regnet(_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase)
_lowercase: Dict = outputs.pooler_output if return_dict else outputs[1]
_lowercase: Union[str, Any] = self.classifier(_UpperCamelCase)
_lowercase: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase: Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase: Union[str, Any] = "single_label_classification"
else:
_lowercase: Optional[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
_lowercase: Dict = MSELoss()
if self.num_labels == 1:
_lowercase: Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_lowercase: int = loss_fct(_UpperCamelCase , _UpperCamelCase)
elif self.config.problem_type == "single_label_classification":
_lowercase: Dict = CrossEntropyLoss()
_lowercase: List[str] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_lowercase: List[str] = BCEWithLogitsLoss()
_lowercase: Union[str, Any] = loss_fct(_UpperCamelCase , _UpperCamelCase)
if not return_dict:
_lowercase: Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states)
| 226
| 1
|
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = 0
# Number of processes finished
A__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A__ = [0] * no_of_process
# List to include calculation results
A__ = [0] * no_of_process
# Sort by arrival time.
A__ = [burst_time[i] for i in np.argsort(lowercase_ )]
A__ = [process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
A__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A__ = arrival_time[i]
A__ = 0
# Index showing the location of the process being performed
A__ = 0
# Saves the current response ratio.
A__ = 0
for i in range(0 , lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A__ = temp
A__ = i
# Calculate the turn around time
A__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = [0] * no_of_process
for i in range(0 , lowercase_ ):
A__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = 5
_lowerCamelCase : str = ["""A""", """B""", """C""", """D""", """E"""]
_lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5]
_lowerCamelCase : Tuple = [1, 2, 3, 4, 5]
_lowerCamelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowerCamelCase : Dict = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 704
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[int]=32 * 4 , UpperCAmelCase__ : int=32 * 6 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict=32 , ) ->Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = is_training
A__ = use_auxiliary_loss
A__ = num_queries
A__ = num_channels
A__ = min_size
A__ = max_size
A__ = num_labels
A__ = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCAmelCase__)
A__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__)
A__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__) > 0.5
).float()
A__ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__) > 0.5).long()
A__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
A__ = output.encoder_hidden_states
A__ = output.pixel_decoder_hidden_states
A__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase__) , config.decoder_config.decoder_layers)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=False) ->Optional[int]:
'''simple docstring'''
with torch.no_grad():
A__ = MaskFormerModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = MaskFormerForInstanceSegmentation(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
def comm_check_on_output(UpperCAmelCase__ : str):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
A__ = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
comm_check_on_output(UpperCAmelCase__)
A__ = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__)
comm_check_on_output(UpperCAmelCase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = MaskFormerModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase__)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
A__ = MaskFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = (self.model_tester.min_size,) * 2
A__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCAmelCase__),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCAmelCase__).long(),
}
A__ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCAmelCase__)
A__ = model(**UpperCAmelCase__)
self.assertTrue(outputs.loss is not None)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__).to(UpperCAmelCase__)
A__ = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__)
self.assertTrue(outputs.attentions is not None)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
A__ = self.all_model_classes[1]
A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.train()
A__ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = self.all_model_classes[1]
A__ , A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = True
A__ = True
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.train()
A__ = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
A__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_lowerCamelCase : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
A__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
A__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
A__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(UpperCAmelCase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
A__ = torch.tensor(UpperCAmelCase__).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
A__ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
A__ = torch.tensor(UpperCAmelCase__).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
A__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(UpperCAmelCase__)
.eval()
)
A__ = self.default_image_processor
A__ = image_processor(
[np.zeros((3, 800, 1_333)), np.zeros((3, 800, 1_333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
A__ = inputs['''pixel_values'''].to(UpperCAmelCase__)
A__ = [el.to(UpperCAmelCase__) for el in inputs['''mask_labels''']]
A__ = [el.to(UpperCAmelCase__) for el in inputs['''class_labels''']]
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
self.assertTrue(outputs.loss is not None)
| 177
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = KandinskyVaaImgaImgPipeline
_lowercase = ['image_embeds', 'negative_image_embeds', 'image']
_lowercase = [
'image_embeds',
'negative_image_embeds',
'image',
]
_lowercase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowercase = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 100
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict ={
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE_ : str =UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str =VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.dummy_unet
SCREAMING_SNAKE_CASE_ : int =self.dummy_movq
SCREAMING_SNAKE_CASE_ : Tuple ={
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE_ : int =DDIMScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE_ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((256, 256) )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str ='cpu'
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : str =output.images
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ : Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Tuple =np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE_ : Dict =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE_ : Optional[Any] ='A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE_ : List[Any] =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : int =pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
SCREAMING_SNAKE_CASE_ : Optional[int] =pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 220
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = 'Hello, World!'
__SCREAMING_SNAKE_CASE = 'en_XX'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : bool ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =Path('data_bin' )
SCREAMING_SNAKE_CASE_ : Any =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowerCAmelCase_ ).parent ) ,checkpoint_file=Path(lowerCAmelCase_ ).name ,_name='xmod_base' ,arch='xmod_base' ,task='multilingual_masked_lm' ,data_name_or_path=str(lowerCAmelCase_ ) ,bpe='sentencepiece' ,sentencepiece_model=str(Path(lowerCAmelCase_ ).parent / 'sentencepiece.bpe.model' ) ,src_dict=str(data_dir / 'dict.txt' ) ,)
xmod.eval() # disable dropout
print(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'bottleneck' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
SCREAMING_SNAKE_CASE_ : Any =xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =XmodForSequenceClassification(lowerCAmelCase_ ) if classification_head else XmodForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ : Any =xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ : Dict =model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE_ : int =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ : Dict =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ : Optional[int] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
SCREAMING_SNAKE_CASE_ : int =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.fca.bias
SCREAMING_SNAKE_CASE_ : str =xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ : List[str] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] =bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =from_adapter.fca.bias
SCREAMING_SNAKE_CASE_ : Optional[Any] =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE_ : str =xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE_ : List[Any] =xmod.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =model(lowerCAmelCase_ )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod.model.classification_heads['mnli'](xmod.extract_features(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model(lowerCAmelCase_ ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE_ : str =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ : str =torch.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 220
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = 1_00 )-> List[Any]:
_snake_case : Tuple = 0
_snake_case : Any = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669
| 0
|
def UpperCamelCase ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 461
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["pixel_values"]
def __init__(self : List[str] , snake_case : bool = True , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : List[Any] , ) -> None:
super().__init__(**snake_case )
_lowercase : List[str] = size if size is not None else {"shortest_edge": 256}
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : List[Any] = do_resize
_lowercase : Optional[Any] = size
_lowercase : Tuple = resample
_lowercase : Tuple = do_center_crop
_lowercase : Any = crop_size
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : List[Any] = do_normalize
_lowercase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a(self : Tuple , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase : int = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _a(self : str , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Union[str, Any] , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _a(self : Union[str, Any] , snake_case : np.ndarray , snake_case : float , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _a(self : int , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _a(self : Optional[int] , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case : Tuple , ) -> Union[str, Any]:
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Tuple = resample if resample is not None else self.resample
_lowercase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : int = crop_size if crop_size is not None else self.crop_size
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowercase : int = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : Tuple = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_lowercase : List[str] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
_lowercase : List[str] = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
_lowercase : Optional[Any] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
_lowercase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
def _a(self : Dict , snake_case : List[str] , snake_case : List[Tuple] = None ) -> Optional[Any]:
_lowercase : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case ) != len(snake_case ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case ):
_lowercase : Dict = target_sizes.numpy()
_lowercase : Tuple = []
for idx in range(len(snake_case ) ):
_lowercase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case )
_lowercase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case )
else:
_lowercase : Optional[int] = logits.argmax(dim=1 )
_lowercase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 461
| 1
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1 , __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
UpperCamelCase :Optional[Any] = 1
UpperCamelCase :Any = 0
for divide_by_number in range(__magic_name__ , digit + 1 ):
UpperCamelCase :list[int] = []
UpperCamelCase :List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__magic_name__ ):
UpperCamelCase :int = len(__magic_name__ )
UpperCamelCase :Any = divide_by_number
else:
has_been_divided.append(__magic_name__ )
UpperCamelCase :List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ : int
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
snake_case__ : jnp.dtype = jnp.floataa
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase :List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase :Optional[Any] = self.block_out_channels[i]
UpperCamelCase :List[Any] = self.block_out_channels[i + 1]
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :Tuple = blocks
UpperCamelCase :Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.conv_in(__lowerCamelCase )
UpperCamelCase :Optional[Any] = nn.silu(__lowerCamelCase )
for block in self.blocks:
UpperCamelCase :Tuple = block(__lowerCamelCase )
UpperCamelCase :List[str] = nn.silu(__lowerCamelCase )
UpperCamelCase :Dict = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
snake_case__ : int = 3_2
snake_case__ : int = 4
snake_case__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ : Union[bool, Tuple[bool]] = False
snake_case__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case__ : int = 2
snake_case__ : Union[int, Tuple[int]] = 8
snake_case__ : Optional[Union[int, Tuple[int]]] = None
snake_case__ : int = 1_2_8_0
snake_case__ : float = 0.0
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : str = "rgb"
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _A ( self : int , __lowerCamelCase : jax.random.KeyArray ):
# init input tensors
UpperCamelCase :int = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase :Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase :int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase :Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase :Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase :int = jax.random.split(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def _A ( self : int ):
UpperCamelCase :Dict = self.block_out_channels
UpperCamelCase :Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase :List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase :Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase :Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase :Tuple = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
UpperCamelCase :List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase :Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = block_out_channels[0]
UpperCamelCase :Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase :List[str] = output_channel
UpperCamelCase :Optional[Any] = block_out_channels[i]
UpperCamelCase :Tuple = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase :List[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
UpperCamelCase :str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = down_blocks
UpperCamelCase :Optional[Any] = controlnet_down_blocks
# mid
UpperCamelCase :str = block_out_channels[-1]
UpperCamelCase :Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ):
UpperCamelCase :Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase :List[Any] = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
UpperCamelCase :Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase :Optional[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
UpperCamelCase :Optional[Any] = self.time_proj(__lowerCamelCase )
UpperCamelCase :Any = self.time_embedding(__lowerCamelCase )
# 2. pre-process
UpperCamelCase :int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Dict = self.conv_in(__lowerCamelCase )
UpperCamelCase :Any = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Optional[int] = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase :int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase , UpperCamelCase :Optional[Any] = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase :List[str] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase :str = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
UpperCamelCase :Any = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase :Optional[Any] = controlnet_down_block_res_samples
UpperCamelCase :str = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
UpperCamelCase :str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 590
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : List[str] ="vit_msn"
def __init__( self : List[str] , lowerCAmelCase : Optional[Any]=7_68 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : int=30_72 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : int=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Any=1e-06 , lowerCAmelCase : List[Any]=2_24 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[Any]=True , **lowerCAmelCase : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : Dict = qkv_bias
| 651
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase =logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_5_5 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , __SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_2_4}
UpperCamelCase__ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase__ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
UpperCamelCase__ : Any = do_resize
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : str = do_center_crop
UpperCamelCase__ : Dict = crop_size
UpperCamelCase__ : Dict = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_normalize
UpperCamelCase__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase__ : str = int((2_5_6 / 2_2_4) * size['''shortest_edge'''] )
UpperCamelCase__ : Any = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__SCREAMING_SNAKE_CASE , size=(size_dict['''height'''], size_dict['''width''']) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Any = resample if resample is not None else self.resample
UpperCamelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Dict = image_std if image_std is not None else self.image_std
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : int = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
UpperCamelCase__ : Any = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : int = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Any = [self.center_crop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : Union[str, Any] = [self.rescale(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : int = [self.normalize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 462
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase__ : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCamelCase__ : Tuple = PipelineDataFormat.from_str(
format=UpperCamelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase__ , UpperCamelCase__ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = nlp
UpperCamelCase__ : Dict = reader
@staticmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase__ : List[str] = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase__ : Union[str, Any] = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 462
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] =logging.get_logger(__name__)
a__ : List[Any] ={
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="unispeech"
def __init__( self : Tuple , __A : int=3_2 , __A : Optional[Any]=7_6_8 , __A : int=1_2 , __A : Tuple=1_2 , __A : List[str]=3_0_7_2 , __A : int="gelu" , __A : Dict=0.1 , __A : Any=0.1 , __A : Any=0.1 , __A : Tuple=0.0 , __A : Optional[int]=0.0 , __A : Tuple=0.1 , __A : Union[str, Any]=0.1 , __A : Any=0.02 , __A : Any=1e-5 , __A : str="group" , __A : Optional[int]="gelu" , __A : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __A : str=(5, 2, 2, 2, 2, 2, 2) , __A : Any=(1_0, 3, 3, 3, 3, 2, 2) , __A : Any=False , __A : List[Any]=1_2_8 , __A : Dict=1_6 , __A : Tuple=False , __A : List[str]=True , __A : Union[str, Any]=0.05 , __A : Union[str, Any]=1_0 , __A : Union[str, Any]=2 , __A : List[Any]=0.0 , __A : List[Any]=1_0 , __A : int=0 , __A : Dict=3_2_0 , __A : List[str]=2 , __A : Any=0.1 , __A : Tuple=1_0_0 , __A : int=2_5_6 , __A : Any=2_5_6 , __A : int=0.1 , __A : Any="mean" , __A : Tuple=False , __A : Optional[int]=False , __A : Any=2_5_6 , __A : str=8_0 , __A : List[Any]=0 , __A : List[Any]=1 , __A : Dict=2 , __A : Dict=0.5 , **__A : Optional[int] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(__A )
__UpperCamelCase = list(__A )
__UpperCamelCase = list(__A )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = feat_quantizer_dropout
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# pretraining loss
__UpperCamelCase = replace_prob
@property
def _lowerCamelCase ( self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 399
|
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : List[str] =701
a__ : Union[str, Any] =1_000_000_000
a__ : Union[str, Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 399
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Any = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""PerceiverFeatureExtractor"""]
lowerCAmelCase : List[str] = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase : Any = 1.054571817E-34 # unit of ℏ : J * s
lowerCAmelCase : List[str] = 3E8 # unit of c : m * s^-1
def _A ( A ,A ,A ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : Optional[int] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=[1, 1, 2] ,_lowerCamelCase=1 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase=8 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu_new" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,_lowerCamelCase=512 ,_lowerCamelCase=3 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=None ,_lowerCamelCase=False ,) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> int:
'''simple docstring'''
__lowercase = TFFunnelModel(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Dict:
'''simple docstring'''
__lowercase = TFFunnelBaseModel(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
__lowercase = TFFunnelForPreTraining(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Dict:
'''simple docstring'''
__lowercase = TFFunnelForMaskedLM(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=_lowerCamelCase )
__lowercase = tf.tile(tf.expand_dims(_lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Dict:
'''simple docstring'''
__lowercase = TFFunnelForQuestionAnswering(config=_lowerCamelCase )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a : Optional[Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a : int = False
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@require_tf
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a : Tuple = False
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFFunnelModelTester(self ,base=_lowerCamelCase )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
| 502
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = ["image_processor", "tokenizer"]
a : Optional[int] = "ChineseCLIPImageProcessor"
a : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_lowerCamelCase ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.image_processor
def __call__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(_lowerCamelCase ,return_tensors=_lowerCamelCase ,**_lowerCamelCase )
if images is not None:
__lowercase = self.image_processor(_lowerCamelCase ,return_tensors=_lowerCamelCase ,**_lowerCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) ,tensor_type=_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase ,**_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_lowerCamelCase ,)
return self.image_processor_class
| 502
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = field(default=lowerCamelCase__ , metadata={"help": "Whether to use SortishSampler or not."} )
SCREAMING_SNAKE_CASE_: Tuple = field(
default=lowerCamelCase__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
SCREAMING_SNAKE_CASE_: List[str] = field(
default=lowerCamelCase__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE_: List[str] = field(
default=lowerCamelCase__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE_: List[Any] = field(
default=lowerCamelCase__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = v.to_dict()
return d
| 702
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = [int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case = input().strip()
_snake_case = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 491
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class A ( __UpperCAmelCase ):
__snake_case = 'facebook/nllb-200-distilled-600M'
__snake_case = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__snake_case = 'translator'
__snake_case = AutoTokenizer
__snake_case = AutoModelForSeqaSeqLM
__snake_case = LANGUAGE_CODES
__snake_case = ['text', 'text', 'text']
__snake_case = ['text']
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
lowerCAmelCase_ = self.lang_to_code[src_lang]
lowerCAmelCase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__, return_tensors='''pt''', src_lang=UpperCamelCase__, tgt_lang=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.model.generate(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=UpperCamelCase__ )
| 325
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = None, UpperCamelCase__ = 0.0, UpperCamelCase__ = 50, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=UpperCamelCase__, )
lowerCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase_ = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase_ = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0, 1 )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 325
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = LongformerTokenizer
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[Any] = LongformerTokenizerFast
_UpperCamelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase = {'unk_token': '<unk>'}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 'lower newer'
lowercase = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase = 'lower newer'
lowercase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase = tokenizer.tokenize(snake_case ) # , add_prefix_space=True)
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=snake_case ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=snake_case ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowercase = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
lowercase = tokenizer.encode(
'sequence builders' , add_special_tokens=snake_case , add_prefix_space=snake_case )
lowercase = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=snake_case , add_prefix_space=snake_case )
lowercase = tokenizer.build_inputs_with_special_tokens(snake_case )
lowercase = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = 'Encode this sequence.'
lowercase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(snake_case , snake_case )
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(snake_case , snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(snake_case , snake_case )
# Testing spaces after special tokens
lowercase = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case )} ) # mask token has a left space
lowercase = tokenizer.convert_tokens_to_ids(snake_case )
lowercase = 'Encode <mask> sequence'
lowercase = 'Encode <mask>sequence'
lowercase = tokenizer.encode(snake_case )
lowercase = encoded.index(snake_case )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(snake_case , snake_case )
lowercase = tokenizer.encode(snake_case )
lowercase = encoded.index(snake_case )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase = 'A, <mask> AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def SCREAMING_SNAKE_CASE__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase = F'''{text_of_1_token} {text_of_1_token}'''
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , )
lowercase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ) + 1, 1 + len(snake_case ) + 1 + len(snake_case )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
lowercase = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , )
| 84
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = StableDiffusionInpaintPipeline
_SCREAMING_SNAKE_CASE :Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_SCREAMING_SNAKE_CASE :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Optional[int] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE :Dict = frozenset([])
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInpaintPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Dict = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 680
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =CodeGenTokenizer
a_ : Optional[int] =CodeGenTokenizerFast
a_ : Optional[int] =True
a_ : Union[str, Any] ={"""add_prefix_space""": True}
a_ : int =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_snake_case : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : Optional[int] = {'unk_token': '<unk>'}
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : Any = 'lower newer'
_snake_case : str = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : List[Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Any = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = 'lower newer'
# Testing tokenization
_snake_case : Union[str, Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : str = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing the unknown token
_snake_case : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
_snake_case : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
_snake_case : Union[str, Any] = 'This is a simple input'
_snake_case : Dict = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : Optional[Any] = ('This is a simple input', 'This is a pair')
_snake_case : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_snake_case : Optional[int] = 'This is a simple input'
_snake_case : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_snake_case : int = ('This is a simple input', 'This is a pair')
_snake_case : List[str] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_snake_case : Union[str, Any] = tokenizer.pad_token_id
_snake_case : Any = tokenizer(UpperCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
_snake_case : Dict = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(*UpperCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = '$$$'
_snake_case : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase , add_bos_token=UpperCamelCase )
_snake_case : Tuple = 'This is a simple input'
_snake_case : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : List[Any] = tokenizer.bos_token_id
_snake_case : int = tokenizer(UpperCamelCase )
_snake_case : Tuple = tokenizer(UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Union[str, Any] = tokenizer.decode(out_s.input_ids )
_snake_case : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_snake_case : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_snake_case : Optional[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : Optional[Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_snake_case : Optional[Any] = tokenizer.decode(UpperCamelCase , truncate_before_pattern=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
| 669
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669
| 1
|
import gc
import threading
import time
import psutil
import torch
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Union[str, Any] = psutil.Process()
snake_case : Union[str, Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = -1
while True:
snake_case : List[str] = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : Optional[Any] = threading.Thread(target=self.peak_monitor )
snake_case : int = True
self.thread.start()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = False
self.thread.join()
return self.cpu_memory_peak
__lowercase : int = PeakCPUMemory()
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case : Union[str, Any] = torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase ( __A : str ) -> List[str]:
'''simple docstring'''
snake_case : Any = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
snake_case : List[str] = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case : List[Any] = (torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
snake_case : Optional[int] = (torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def lowercase ( __A : Dict , __A : Optional[int] ) -> Dict:
'''simple docstring'''
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__A )]:.2f}MiB""" )
snake_case : Tuple = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 36
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : List[str] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = tmp_path / '''cache'''
lowercase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase__ : int = features.copy() if features else default_expected_features
lowercase__ : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Dict = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase__ : Optional[Any] = iter_sql_file(__lowerCamelCase )
lowercase__ : List[str] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 560
| 0
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 708
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 621
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_a : List[Any] = logging.getLogger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
_lowerCAmelCase : Optional[int] = label_idx
def __A ( self , a__ , a__ ):
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[str] = mode.value
_lowerCAmelCase : List[Any] = os.path.join(a__ , F"{mode}.txt" )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Union[str, Any] = []
with open(a__ , encoding="""utf-8""" ) as f:
_lowerCAmelCase : str = []
_lowerCAmelCase : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a__ , labels=a__ ) )
guid_index += 1
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = []
else:
_lowerCAmelCase : str = line.split(""" """ )
words.append(splits[0] )
if len(a__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a__ , labels=a__ ) )
return examples
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(a__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase : List[Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(a__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __A ( self , a__ ):
if path:
with open(a__ , """r""" ) as f:
_lowerCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Tuple = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self , a__ ):
if path:
with open(a__ , """r""" ) as f:
_lowerCAmelCase : str = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Any = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self , a__ , a__ ):
if isinstance(a__ , a__ ):
_lowerCAmelCase : Any = mode.value
_lowerCAmelCase : Union[str, Any] = os.path.join(a__ , F"{mode}.txt" )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Optional[int] = []
with open(a__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(a__ ):
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(a__ ) == len(a__ )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a__ , labels=a__ ) )
guid_index += 1
return examples
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : int = 0
for sentence in parse_incr(a__ ):
_lowerCAmelCase : int = preds_list[example_id]
_lowerCAmelCase : List[Any] = """"""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(a__ )
example_id += 1
def __A ( self , a__ ):
if path:
with open(a__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 213
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : int = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "big_bird"
def __init__( self , a__=50358 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=4096 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=True , a__=0 , a__=1 , a__=2 , a__=66 , a__="block_sparse" , a__=True , a__=False , a__=64 , a__=3 , a__=None , **a__ , ):
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : List[Any] = rescale_embeddings
_lowerCAmelCase : Any = attention_type
_lowerCAmelCase : List[Any] = use_bias
_lowerCAmelCase : Dict = block_size
_lowerCAmelCase : Dict = num_random_blocks
_lowerCAmelCase : str = classifier_dropout
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 213
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase : Any =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Union[str, Any] ) -> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 260
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : str = MaskFormerConfig(backbone_config=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[Any] = 847
__SCREAMING_SNAKE_CASE : Optional[int] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Dict = 150
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Union[str, Any] = 171
__SCREAMING_SNAKE_CASE : Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__SCREAMING_SNAKE_CASE : Dict = 133
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[int] = 19
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Tuple = 65
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mapillary-vistas-id2label.json'''
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Any = {int(lowercase__ ): v for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__SCREAMING_SNAKE_CASE : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-dim :, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# fmt: off
__SCREAMING_SNAKE_CASE : Any = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Dict = get_maskformer_config(lowercase__ )
# load original state_dict
with open(lowercase__ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = pickle.load(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# update to torch tensors
for key, value in state_dict.items():
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowercase__ )
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerForInstanceSegmentation(lowercase__ )
model.eval()
for name, param in model.named_parameters():
print(lowercase__ , param.shape )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 65
elif "cityscapes" in model_name:
__SCREAMING_SNAKE_CASE : str = 65535
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 255
__SCREAMING_SNAKE_CASE : Dict = True if '''ade''' in model_name else False
__SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=lowercase__ , reduce_labels=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowercase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Any = model(**lowercase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 260
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : str = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 653
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 61
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__a : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Dict = ["""input_features""", """attention_mask"""]
def __init__( self , lowerCamelCase=80 , lowerCamelCase=16000 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=25 , lowerCamelCase="hamming_window" , lowerCamelCase=32768.0 , lowerCamelCase=0.97 , lowerCamelCase=1.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase )
lowercase__ : Tuple = feature_size
lowercase__ : Union[str, Any] = sampling_rate
lowercase__ : Any = padding_value
lowercase__ : Optional[Any] = hop_length
lowercase__ : Optional[int] = win_length
lowercase__ : int = frame_signal_scale
lowercase__ : Optional[int] = preemphasis_coeff
lowercase__ : Optional[int] = mel_floor
lowercase__ : Dict = normalize_means
lowercase__ : Dict = normalize_vars
lowercase__ : int = win_function
lowercase__ : int = return_attention_mask
lowercase__ : List[Any] = win_length * sampling_rate // 1000
lowercase__ : Optional[Any] = hop_length * sampling_rate // 1000
lowercase__ : Any = optimal_fft_length(self.sample_size )
lowercase__ : Optional[Any] = (self.n_fft // 2) + 1
def __a ( self , lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
lowercase__ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase )
else:
lowercase__ : str = window_function(window_length=self.sample_size , name=self.win_function )
lowercase__ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase__ : Union[str, Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if self.normalize_means:
lowercase__ : List[str] = x[:input_length].mean(axis=0 )
lowercase__ : List[str] = np.subtract(lowerCamelCase , lowerCamelCase )
if self.normalize_vars:
lowercase__ : int = x[:input_length].std(axis=0 )
lowercase__ : List[str] = np.divide(lowerCamelCase , lowerCamelCase )
if input_length < x.shape[0]:
lowercase__ : Dict = padding_value
# make sure array is in float32
lowercase__ : Dict = x.astype(np.floataa )
return x
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> List[np.ndarray]:
"""simple docstring"""
lowercase__ : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase , lowerCamelCase , self.padding_value ) for x, n in zip(lowerCamelCase , lowerCamelCase )]
def __call__( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : Tuple = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Tuple = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : str = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
lowercase__ : Union[str, Any] = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : int = [raw_speech]
# extract fbank features
lowercase__ : int = [self._extract_mfsc_features(lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase__ : Dict = BatchFeature({"input_features": features} )
lowercase__ : Tuple = self.pad(
lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
# make sure list is in array format
lowercase__ : Tuple = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCamelCase ):
lowercase__ : Tuple = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_features]
lowercase__ : Tuple = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowercase__ : Optional[Any] = [np.asarray(lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase__ : Union[str, Any] = (
np.array(lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase , max_length=lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase__ : Optional[Any] = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCamelCase )
if return_tensors is not None:
lowercase__ : int = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 700
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__a : int = logging.get_logger(__name__)
__a : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a : Any = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__a : Optional[int] = {'''mobilebert-uncased''': 5_1_2}
__a : Dict = {}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = MobileBertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowercase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(lowerCamelCase , normalizer_state.pop("type" ) )
lowercase__ : Dict = do_lower_case
lowercase__ : Dict = strip_accents
lowercase__ : List[Any] = tokenize_chinese_chars
lowercase__ : Any = normalizer_class(**lowerCamelCase )
lowercase__ : Union[str, Any] = do_lower_case
def __a ( self , lowerCamelCase , lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : List[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 298
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( A__ ):
'''simple docstring'''
def __init__( self : str , snake_case : TransformeraDModel , snake_case : AutoencoderKL , snake_case : KarrasDiffusionSchedulers , snake_case : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=snake_case , vae=snake_case , scheduler=snake_case )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ :Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
UpperCAmelCase_ :Any = int(snake_case )
UpperCAmelCase_ :Optional[Any] = dict(sorted(self.labels.items() ) )
def snake_case_ ( self : str , snake_case : Union[str, List[str]] ):
if not isinstance(snake_case , snake_case ):
UpperCAmelCase_ :List[str] = list(snake_case )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Tuple , snake_case : List[int] , snake_case : float = 4.0 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : int = 50 , snake_case : Optional[str] = "pil" , snake_case : bool = True , ):
UpperCAmelCase_ :List[Any] = len(snake_case )
UpperCAmelCase_ :List[Any] = self.transformer.config.sample_size
UpperCAmelCase_ :Dict = self.transformer.config.in_channels
UpperCAmelCase_ :Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ :Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ :Dict = torch.tensor(snake_case , device=self.device ).reshape(-1 )
UpperCAmelCase_ :Any = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ :Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ :List[str] = latent_model_input[: len(snake_case ) // 2]
UpperCAmelCase_ :Union[str, Any] = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ :List[str] = self.scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase_ :List[Any] = t
if not torch.is_tensor(snake_case ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ :Dict = latent_model_input.device.type == '''mps'''
if isinstance(snake_case , snake_case ):
UpperCAmelCase_ :Dict = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ :int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ :List[Any] = torch.tensor([timesteps] , dtype=snake_case , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ :Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ :List[str] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ :Tuple = self.transformer(
snake_case , timestep=snake_case , class_labels=snake_case ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ ,UpperCAmelCase_ :Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ ,UpperCAmelCase_ :List[Any] = torch.split(snake_case , len(snake_case ) // 2 , dim=0 )
UpperCAmelCase_ :Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ :str = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ :Any = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[Any] = torch.split(snake_case , snake_case , dim=1 )
else:
UpperCAmelCase_ :Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ :Dict = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ ,UpperCAmelCase_ :int = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ :str = latent_model_input
UpperCAmelCase_ :List[str] = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ :Optional[int] = self.vae.decode(snake_case ).sample
UpperCAmelCase_ :Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ :List[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ :List[str] = self.numpy_to_pil(snake_case )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case )
| 608
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = torch.nn.Linear(2, 4 )
UpperCAmelCase_ :Optional[int] = torch.optim.AdamW(model.parameters(), lr=1.0 )
UpperCAmelCase_ :List[str] = torch.optim.lr_scheduler.OneCycleLR(__snake_case, max_lr=0.01, steps_per_epoch=2, epochs=1 )
UpperCAmelCase_ :Union[str, Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_ :int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def a ( __snake_case : int ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def a ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ :Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__snake_case )
class _snake_case ( A__ ):
'''simple docstring'''
@require_cuda
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(snake_case ):
UpperCAmelCase_ :Dict = Accelerator(cpu=snake_case )
def snake_case_ ( self : Tuple ):
UpperCAmelCase_ :Dict = Accelerator()
UpperCAmelCase_ :Optional[Any] = GradientState()
assert state.num_steps == 1
UpperCAmelCase_ :Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_ :Any = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self : Tuple ):
UpperCAmelCase_ :Union[str, Any] = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = create_components()
(
(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,
) :Union[str, Any] = accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Tuple = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :str = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self : Dict ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*snake_case : int , **snake_case : Optional[Any] ):
pass
with patch('''torch.cuda.set_device''' , snake_case ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
UpperCAmelCase_ :List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def snake_case_ ( self : Union[str, Any] ):
UpperCAmelCase_ :int = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :str = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase_ :Dict = get_signature(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
def snake_case_ ( self : str ):
UpperCAmelCase_ :Union[str, Any] = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :Tuple = create_components()
accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase_ :Union[str, Any] = get_signature(snake_case )
# saving hook
def save_config(snake_case : Tuple , snake_case : Any , snake_case : Tuple ):
UpperCAmelCase_ :List[str] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(snake_case , '''data.json''' ) , '''w''' ) as f:
json.dump(snake_case , snake_case )
# loading hook
def load_config(snake_case : Optional[int] , snake_case : Dict ):
with open(os.path.join(snake_case , '''data.json''' ) , '''r''' ) as f:
UpperCAmelCase_ :Dict = json.load(snake_case )
UpperCAmelCase_ :Any = config['''class_name''']
UpperCAmelCase_ :str = accelerator.register_save_state_pre_hook(snake_case )
UpperCAmelCase_ :Any = accelerator.register_load_state_pre_hook(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match with hooks
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ :Optional[Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(snake_case )
# make sure random weights don't match with hooks removed
load_random_weights(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ :Optional[int] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(snake_case )
self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self : Tuple ):
UpperCAmelCase_ :str = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :Dict = create_components()
UpperCAmelCase_ :int = None
# This should work
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Any = Accelerator()
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :str = create_components()
UpperCAmelCase_ :str = [1, 2, 3]
# This should work
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :int = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(snake_case , '''_is_accelerate_prepared''' , snake_case ) , snake_case , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def snake_case_ ( self : Optional[Any] ):
from transformers import AutoModelForCausalLM
UpperCAmelCase_ :Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=snake_case , device_map={'''''': 0} , )
UpperCAmelCase_ :Optional[Any] = Accelerator()
# This should work
UpperCAmelCase_ :Optional[int] = accelerator.prepare(snake_case )
@slow
@require_bnb
def snake_case_ ( self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
UpperCAmelCase_ :Dict = Accelerator()
with init_empty_weights():
UpperCAmelCase_ :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
UpperCAmelCase_ :Optional[Any] = infer_auto_device_map(snake_case )
UpperCAmelCase_ :List[Any] = '''cpu'''
UpperCAmelCase_ :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=snake_case , load_in_abit=snake_case , llm_inta_enable_fpaa_cpu_offload=snake_case )
# This should not work and get value error
with self.assertRaises(snake_case ):
UpperCAmelCase_ :Union[str, Any] = accelerator.prepare(snake_case )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
UpperCAmelCase_ :Optional[int] = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_ :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
UpperCAmelCase_ :Optional[int] = infer_auto_device_map(snake_case )
UpperCAmelCase_ :Optional[Any] = 1
UpperCAmelCase_ :str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=snake_case , device_map=snake_case , )
UpperCAmelCase_ :Optional[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(snake_case ):
UpperCAmelCase_ :Optional[int] = accelerator.prepare(snake_case )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self : str ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_ :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
UpperCAmelCase_ :Union[str, Any] = infer_auto_device_map(snake_case )
UpperCAmelCase_ :List[Any] = 1
UpperCAmelCase_ :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=snake_case , device_map=snake_case , )
UpperCAmelCase_ :Tuple = Accelerator()
# This should work
UpperCAmelCase_ :Dict = accelerator.prepare(snake_case )
@require_cuda
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Any = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ :Tuple = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_ :Any = Accelerator(cpu=snake_case )
UpperCAmelCase_ :List[str] = accelerator.prepare(snake_case )
| 608
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
_a : int = '''poolformer'''
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[64, 128, 320, 512] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1E-5 , lowerCamelCase__=0.02 , **lowerCamelCase__ , ) -> Optional[Any]:
lowercase__ : Optional[Any] = num_channels
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Union[str, Any] = stride
lowercase__ : Optional[Any] = padding
lowercase__ : Optional[Any] = pool_size
lowercase__ : Tuple = hidden_sizes
lowercase__ : Optional[Any] = mlp_ratio
lowercase__ : List[Any] = depths
lowercase__ : Union[str, Any] = patch_sizes
lowercase__ : int = strides
lowercase__ : Tuple = num_encoder_blocks
lowercase__ : int = drop_path_rate
lowercase__ : List[Any] = hidden_act
lowercase__ : Any = use_layer_scale
lowercase__ : Optional[int] = layer_scale_init_value
lowercase__ : List[Any] = initializer_range
super().__init__(**lowerCamelCase__ )
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
"""simple docstring"""
_a : Dict = version.parse('''1.11''' )
@property
def UpperCAmelCase__( self ) -> str:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return 2E-3
| 720
|
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( lowerCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowerCamelCase__ : int = 1_00_01 ):
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 128
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 534
|
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534
| 1
|
'''simple docstring'''
from ... import PretrainedConfig
UpperCamelCase_ : str = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class lowerCAmelCase ( __A ):
"""simple docstring"""
UpperCamelCase__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase__ = '''nezha'''
def __init__( self : Any ,a__ : List[str]=2_11_28 ,a__ : Dict=7_68 ,a__ : Tuple=12 ,a__ : Dict=12 ,a__ : List[str]=30_72 ,a__ : int="gelu" ,a__ : Dict=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : int=5_12 ,a__ : Tuple=64 ,a__ : str=2 ,a__ : Any=0.02 ,a__ : List[Any]=1e-12 ,a__ : Union[str, Any]=0.1 ,a__ : List[str]=0 ,a__ : Optional[Any]=2 ,a__ : Tuple=3 ,a__ : Optional[Any]=True ,**a__ : Tuple ,):
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__ )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = max_relative_position
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = classifier_dropout
a__ = use_cache
| 704
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = random.randint(0 , len(_lowercase ) - 1 )
a__ = parent_a[:random_slice] + parent_a[random_slice:]
a__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__ = random.choice(_lowercase )
return "".join(_lowercase )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
a__ = []
# Generate more children proportionally to the fitness score.
a__ = int(parent_a[1] * 1_00 ) + 1
a__ = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
a__ = population_score[random.randint(0 , _lowercase )][0]
a__ , a__ = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
a__ = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
a__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__ = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowercase )
# Generate random starting population.
a__ = []
for _ in range(_lowercase ):
population.append("".join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__ = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
a__ = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
a__ = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ : int = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 394
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DownBlockaD # noqa F405
lowercase_ = "down"
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ResnetDownsampleBlockaD # noqa F405
lowercase_ = "down"
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnDownBlockaD # noqa F405
lowercase_ = "down"
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CrossAttnDownBlockaD # noqa F405
lowercase_ = "down"
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SimpleCrossAttnDownBlockaD # noqa F405
lowercase_ = "down"
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SkipDownBlockaD # noqa F405
lowercase_ = "down"
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnSkipDownBlockaD # noqa F405
lowercase_ = "down"
@property
def lowerCAmelCase_ ( self : int ):
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DownEncoderBlockaD # noqa F405
lowercase_ = "down"
@property
def lowerCAmelCase_ ( self : List[str] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnDownEncoderBlockaD # noqa F405
lowercase_ = "down"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetMidBlockaD # noqa F405
lowercase_ = "mid"
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'temb_channels': 128,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetMidBlockaDCrossAttn # noqa F405
lowercase_ = "mid"
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase_ = "mid"
@property
def lowerCAmelCase_ ( self : List[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ResnetUpsampleBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CrossAttnUpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SimpleCrossAttnUpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnUpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SkipUpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnSkipUpBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UpDecoderBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : List[str] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = {'in_channels': 32, 'out_channels': 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = AttnUpDecoderBlockaD # noqa F405
lowercase_ = "up"
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = {'in_channels': 32, 'out_channels': 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_lowerCAmelCase )
| 31
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
super().__init__(
lowerCamelCase , question_encoder_tokenizer=lowerCamelCase , generator_tokenizer=lowerCamelCase , index=lowerCamelCase , init_retrieval=lowerCamelCase , )
_lowerCAmelCase = None
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_lowerCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCAmelCase = str(distributed_port + 1 )
_lowerCAmelCase = dist.new_group(ranks=lowerCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def A__ (self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=torch.floataa ):
'''simple docstring'''
_lowerCAmelCase = torch.empty(lowerCamelCase , dtype=lowerCamelCase )
dist.scatter(lowerCamelCase , src=0 , scatter_list=lowerCamelCase , group=self.process_group )
return target_tensor
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCamelCase )
return ifname
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not dist.is_initialized():
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(lowerCamelCase , lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase )
# distributed training
_lowerCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCAmelCase = None
if self._is_main():
_lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCamelCase )]
dist.gather(torch.tensor(lowerCamelCase ) , dst=0 , gather_list=lowerCamelCase , group=self.process_group )
# scatter logic
_lowerCAmelCase = question_hidden_states.shape[0]
_lowerCAmelCase = []
_lowerCAmelCase = []
if self._is_main():
assert len(lowerCamelCase ) == world_size
_lowerCAmelCase , _lowerCAmelCase = self._main_retrieve(torch.cat(lowerCamelCase ).numpy() , lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._chunk_tensor(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCAmelCase = self._scattered(lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase )
| 156
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case__ ( self ):
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self ):
try:
_lowerCamelCase = tempfile.mktemp()
with open(lowerCamelCase__ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCamelCase__ )
_lowerCamelCase = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCamelCase__ )
_lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def snake_case__ ( self ):
_lowerCamelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
_lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id='''test-tokenizer''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
_lowerCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def snake_case__ ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
_lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
_lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
_lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def snake_case__ ( self ):
_lowerCamelCase = Trie()
_lowerCamelCase = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ['''AB''', '''C'''] )
| 703
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger()
@dataclass
class _lowercase :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=_A )
_a : list = field(default_factory=_A )
def lowercase__ ( self , a , a , a ):
snake_case__ : Tuple =len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self , a ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=_A )
_a : List = field(default_factory=_A )
_a : bool = True
def __call__( self , a ):
snake_case__ : Any =Tracker(self.dest )(a ).parametrized
snake_case__ : Optional[int] =Tracker(self.src )(a ).parametrized
snake_case__ : str =list(filter(lambda a : type(a ) not in self.src_skip , a ) )
snake_case__ : Any =list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(a )} operations while"
F" destination module has {len(a )}." )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class _lowercase ( nn.Module ):
def __init__( self , a ):
super().__init__()
snake_case__ : List[Tuple[str, nn.Module]] =[]
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F"Unexpected layer name {k}"
snake_case__ : Optional[int] =len(a ) + 1
feature_blocks.append((F"res{block_index}", v) )
snake_case__ : Dict =nn.ModuleDict(a )
def lowercase__ ( self , a ):
return get_trunk_forward_outputs(
a , out_feat_keys=a , feature_blocks=self._feature_blocks , )
class _lowercase ( _A ):
def lowercase__ ( self , a ):
snake_case__ : Optional[Any] =x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a ):
# default to timm!
if x not in self:
snake_case__ : Optional[int] =self.convert_name_to_timm(a )
snake_case__ : int =partial(lambda: (timm.create_model(a , pretrained=a ).eval(), None) )
else:
snake_case__ : Optional[Any] =super().__getitem__(a )
return val
class _lowercase ( _A ):
def __getitem__( self , a ):
if "seer" in x and "in1k" not in x:
snake_case__ : Dict =RegNetModel
else:
snake_case__ : Optional[int] =RegNetForImageClassification
return val
def A__ ( _a : str , _a : int , _a : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
snake_case__ : Dict =from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def A__ ( _a : str , _a : Callable[[], nn.Module] , _a : Callable[[], nn.Module] , _a : RegNetConfig , _a : Path , _a : bool = True , ):
'''simple docstring'''
print(f"Converting {name}..." )
with torch.no_grad():
snake_case__ , snake_case__ : str =from_model_func()
snake_case__ : Any =our_model_func(_a ).eval()
snake_case__ : Dict =ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
snake_case__ : List[Any] =torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
snake_case__ : Any =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case__ : List[str] =[("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
snake_case__ : Optional[Any] =manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
snake_case__ : Any =our_model(_a , output_hidden_states=_a )
snake_case__ : str =(
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
snake_case__ : Dict =from_model(_a )
snake_case__ : List[Any] =from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case__ : Optional[int] =our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_a , )
snake_case__ : Union[str, Any] =224 if """seer""" not in name else 384
# we can use the convnext one
snake_case__ : str =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_a , )
print(f"Pushed {name}" )
def A__ ( _a : Path , _a : str = None , _a : bool = True ):
'''simple docstring'''
snake_case__ : List[Any] ="""imagenet-1k-id2label.json"""
snake_case__ : List[str] =1000
snake_case__ : Any =(1, num_labels)
snake_case__ : int ="""huggingface/label-files"""
snake_case__ : Any =num_labels
snake_case__ : Any =json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="""dataset""" ) ) , """r""" ) )
snake_case__ : Optional[int] ={int(_a ): v for k, v in idalabel.items()}
snake_case__ : Dict =idalabel
snake_case__ : Dict ={v: k for k, v in idalabel.items()}
snake_case__ : Dict =partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
snake_case__ : Any ={
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
snake_case__ : Any =NameToOurModelFuncMap()
snake_case__ : List[str] =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a : str , _a : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case__ : Tuple =torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location="""cpu""" )
snake_case__ : Dict =model_func()
# check if we have a head, if yes add it
snake_case__ : Any =files["""classy_state_dict"""]["""base_model"""]["""model"""]
snake_case__ : Optional[Any] =model_state_dict["""trunk"""]
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case__ : int =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case__ : Optional[int] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case__ : List[Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case__ : Union[str, Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
snake_case__ : List[Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case__ : Union[str, Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case__ : Union[str, Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case__ : Optional[Any] =partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 385
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase : List[str] = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCamelCase : Optional[Any] = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCamelCase : List[str] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCamelCase : Tuple = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCamelCase : List[Any] = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase : Any = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCamelCase : Tuple = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def A__ ( ):
'''simple docstring'''
snake_case__ , snake_case__ : Any =randrange(len(_a ) ), randrange(len(_a ) )
snake_case__ : Tuple =["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Union[str, Any] =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A__ ( _a : int = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : List[Any] , _a : Any ):
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Union[str, Any] , _a : int ):
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _a )
def A__ ( _a : Optional[int] , _a : Tuple , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Any , _a : Tuple ):
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _a )
def A__ ( _a : str , _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def A__ ( _a : Any , _a : Optional[Any] , _a : str ):
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def A__ ( ):
'''simple docstring'''
snake_case__ : str =[PokerHand(_a ) for hand in SORTED_HANDS]
snake_case__ : List[str] =poker_hands.copy()
shuffle(_a )
snake_case__ : Any =chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def A__ ( ):
'''simple docstring'''
snake_case__ : Tuple =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A__ ( ):
'''simple docstring'''
snake_case__ : Optional[int] =PokerHand("""2C 4S AS 3D 5C""" )
snake_case__ : Optional[Any] =True
snake_case__ : Any =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A__ ( ):
'''simple docstring'''
snake_case__ : Tuple =0
snake_case__ : int =os.path.abspath(os.path.dirname(_a ) )
snake_case__ : List[Any] =os.path.join(_a , """poker_hands.txt""" )
with open(_a ) as file_hand:
for line in file_hand:
snake_case__ : List[Any] =line[:14].strip()
snake_case__ : Any =line[15:].strip()
snake_case__ , snake_case__ : str =PokerHand(_a ), PokerHand(_a )
snake_case__ : Optional[Any] =player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 376
| 385
| 1
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Dict:
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = dct.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if "vqa" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = 3129
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "vqa2-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = ViltForQuestionAnswering(__SCREAMING_SNAKE_CASE )
elif "nlvr" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = 2
UpperCAmelCase_ = {0: "False", 1: "True"}
UpperCAmelCase_ = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase_ = 3
UpperCAmelCase_ = ViltForImagesAndTextClassification(__SCREAMING_SNAKE_CASE )
elif "irtr" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = ViltForImageAndTextRetrieval(__SCREAMING_SNAKE_CASE )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = ViltForMaskedLM(__SCREAMING_SNAKE_CASE )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" )["state_dict"]
UpperCAmelCase_ = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if mlm_model or irtr_model:
UpperCAmelCase_ = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Define processor
UpperCAmelCase_ = ViltImageProcessor(size=384 )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = ViltProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase_ = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
UpperCAmelCase_ = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCAmelCase_ = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCAmelCase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase_ = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
if mlm_model:
UpperCAmelCase_ = "a bunch of [MASK] laying on a [MASK]."
else:
UpperCAmelCase_ = "How many cats are there?"
UpperCAmelCase_ = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
if mlm_model:
UpperCAmelCase_ = torch.Size([1, 11, 3_0522] )
UpperCAmelCase_ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase_ = torch.Size([1, 3129] )
UpperCAmelCase_ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCAmelCase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase_ = torch.Size([1, 2] )
UpperCAmelCase_ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 579
|
from math import factorial
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int:
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 579
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , UpperCamelCase__ , )
class _UpperCamelCase (UpperCamelCase__ ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self , __UpperCamelCase )-> int:
super().__init__(_a )
__lowerCAmelCase = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , UpperCamelCase__ , )
class _UpperCamelCase (UpperCamelCase__ ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self , __UpperCamelCase )-> Any:
super().__init__(_a )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = config.num_hidden_layers
__lowerCAmelCase = DeeRobertaModel(_a )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def __UpperCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=-1 , __UpperCamelCase=False , )-> List[str]:
__lowerCAmelCase = self.num_layers
try:
__lowerCAmelCase = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
__lowerCAmelCase = outputs[1]
__lowerCAmelCase = self.dropout(_a )
__lowerCAmelCase = self.classifier(_a )
__lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase = e.message
__lowerCAmelCase = e.exit_layer
__lowerCAmelCase = outputs[0]
if not self.training:
__lowerCAmelCase = entropy(_a )
__lowerCAmelCase = []
__lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCAmelCase = []
for highway_exit in outputs[-1]:
__lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
__lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase = (loss,) + outputs
if not self.training:
__lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 710
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase : int = 1.0_5457_1817E-34 # unit of ℏ : J * s
lowerCamelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
__lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__lowerCAmelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__lowerCAmelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
| 0
|
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowerCAmelCase_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowerCAmelCase_ ) == 1:
return True
UpperCAmelCase = series[1] - series[0]
for index in range(len(lowerCAmelCase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _UpperCamelCase ( lowerCAmelCase_ ) ->float:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowerCAmelCase_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
UpperCAmelCase = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 377
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if "cls_token" in name:
__snake_case = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
__snake_case = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
__snake_case = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__snake_case = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__snake_case = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__snake_case = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
__snake_case = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__snake_case = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
__snake_case = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__snake_case = name.replace("attn" , "attention.self" )
if "norm1" in name:
__snake_case = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__snake_case = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__snake_case = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__snake_case = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__snake_case = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__snake_case = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
__snake_case = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
__snake_case = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
__snake_case = key.split("." )
__snake_case = int(key_split[1] )
if "decoder_blocks" in key:
__snake_case = config.decoder_hidden_size
__snake_case = "decoder.decoder_layers."
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
elif "bias" in key:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
else:
__snake_case = config.hidden_size
__snake_case = "vit.encoder.layer."
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
elif "bias" in key:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = ViTMAEConfig()
if "large" in checkpoint_url:
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
elif "huge" in checkpoint_url:
__snake_case = 14
__snake_case = 1280
__snake_case = 5120
__snake_case = 32
__snake_case = 16
__snake_case = ViTMAEForPreTraining(_lowerCAmelCase )
__snake_case = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
__snake_case = ViTMAEImageProcessor(size=config.image_size )
__snake_case = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
__snake_case = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
__snake_case = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__snake_case = ViTMAEImageProcessor(size=config.image_size )
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__snake_case = model(**_lowerCAmelCase )
__snake_case = outputs.logits
if "large" in checkpoint_url:
__snake_case = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
__snake_case = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
__snake_case = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 718
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case = 1
__snake_case = 3
__snake_case = (3_2, 3_2)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
return image
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
def extract(*SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
class UpperCamelCase:
def __init__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = torch.ones([0] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__snake_case = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__snake_case = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
__snake_case = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
__snake_case = unet.half()
__snake_case = vae.half()
__snake_case = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=SCREAMING_SNAKE_CASE )
__snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
__snake_case = 4_0_0_3_6_6_0_3_4_6
__snake_case = 7
# without safety guidance (sld_guidance_scale = 0)
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=SCREAMING_SNAKE_CASE )
__snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "padme amidala taking a bath artwork, safe for work, no nudity"
__snake_case = 2_7_3_4_9_7_1_7_5_5
__snake_case = 7
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
__snake_case = 1_0_4_4_3_5_5_2_3_4
__snake_case = 1_2
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 473
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 86
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
_lowercase : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt')
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=a)
self.assertEqual(
a , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
SCREAMING_SNAKE_CASE = text_generator(['This is a test', 'This is a second test'])
self.assertEqual(
a , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(a)},
{'generated_token_ids': ANY(a)},
] , )
SCREAMING_SNAKE_CASE = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = text_generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(a)},
{'generated_token_ids': ANY(a)},
],
[
{'generated_token_ids': ANY(a)},
{'generated_token_ids': ANY(a)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf')
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=a)
self.assertEqual(
a , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
SCREAMING_SNAKE_CASE = text_generator(['This is a test', 'This is a second test'] , do_sample=a)
self.assertEqual(
a , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TextGenerationPipeline(model=a , tokenizer=a)
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'Hello I believe in'
SCREAMING_SNAKE_CASE = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2')
SCREAMING_SNAKE_CASE = text_generator(a)
self.assertEqual(
a , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
SCREAMING_SNAKE_CASE = text_generator(a , stop_sequence=' fe')
self.assertEqual(a , [{'generated_text': 'Hello I believe in fe'}])
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str:
SCREAMING_SNAKE_CASE = text_generator.model
SCREAMING_SNAKE_CASE = text_generator.tokenizer
SCREAMING_SNAKE_CASE = text_generator('This is a test')
self.assertEqual(a , [{'generated_text': ANY(a)}])
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test'))
SCREAMING_SNAKE_CASE = text_generator('This is a test' , return_full_text=a)
self.assertEqual(a , [{'generated_text': ANY(a)}])
self.assertNotIn('This is a test' , outputs[0]['generated_text'])
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model=a , tokenizer=a , return_full_text=a)
SCREAMING_SNAKE_CASE = text_generator('This is a test')
self.assertEqual(a , [{'generated_text': ANY(a)}])
self.assertNotIn('This is a test' , outputs[0]['generated_text'])
SCREAMING_SNAKE_CASE = text_generator('This is a test' , return_full_text=a)
self.assertEqual(a , [{'generated_text': ANY(a)}])
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test'))
SCREAMING_SNAKE_CASE = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = text_generator('test' , return_full_text=a , return_text=a)
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = text_generator('test' , return_full_text=a , return_tensors=a)
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = text_generator('test' , return_text=a , return_tensors=a)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE = text_generator('')
self.assertEqual(a , [{'generated_text': ANY(a)}])
else:
with self.assertRaises((ValueError, AssertionError)):
SCREAMING_SNAKE_CASE = text_generator('')
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator('This is a test' * 500 , max_new_tokens=20)
SCREAMING_SNAKE_CASE = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(a):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> int:
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
SCREAMING_SNAKE_CASE = pipe('This is a test')
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
SCREAMING_SNAKE_CASE = pipe('This is a test')
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto')
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
SCREAMING_SNAKE_CASE = pipe('This is a test')
self.assertEqual(
a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
import torch
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa)
pipe('This is a test')
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import torch
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa)
pipe('This is a test' , do_sample=a , top_p=0.5)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'Hello world'
SCREAMING_SNAKE_CASE = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2')
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.generation.tf_utils')
else:
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.generation.utils')
SCREAMING_SNAKE_CASE = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a) as cl:
SCREAMING_SNAKE_CASE = text_generator(a , max_length=10 , max_new_tokens=1)
self.assertIn(a , cl.out)
# The user only sets one -> no warning
with CaptureLogger(a) as cl:
SCREAMING_SNAKE_CASE = text_generator(a , max_new_tokens=1)
self.assertNotIn(a , cl.out)
with CaptureLogger(a) as cl:
SCREAMING_SNAKE_CASE = text_generator(a , max_length=10)
self.assertNotIn(a , cl.out)
| 705
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _snake_case :
_lowercase : Optional[int] = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _snake_case :
_lowercase : str = field(
default=A__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : str = field(
default=A__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : Optional[bool] = field(
default=A__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _UpperCAmelCase)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase)
datasets.utils.logging.set_verbosity(_UpperCAmelCase)
transformers.utils.logging.set_verbosity(_UpperCAmelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = train_dataset.features['label'].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = eval_dataset.features['label'].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = predict_dataset.features['label'].names
# Labels
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel={str(_UpperCAmelCase): label for i, label in enumerate(_UpperCAmelCase)} , labelaid={label: i for i, label in enumerate(_UpperCAmelCase)} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE = False
def preprocess_function(_UpperCAmelCase):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_UpperCAmelCase , max_length=data_args.max_seq_length , truncation=_UpperCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_train_samples)
SCREAMING_SNAKE_CASE = train_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='train dataset map pre-processing'):
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCAmelCase)) , 3):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''')
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_eval_samples)
SCREAMING_SNAKE_CASE = eval_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_predict_samples)
SCREAMING_SNAKE_CASE = predict_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
SCREAMING_SNAKE_CASE = predict_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
SCREAMING_SNAKE_CASE = evaluate.load('xnli')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase) else p.predictions
SCREAMING_SNAKE_CASE = np.argmax(_UpperCAmelCase , axis=1)
return metric.compute(predictions=_UpperCAmelCase , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8)
else:
SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase)
)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase)
trainer.save_metrics('train' , _UpperCAmelCase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.log_metrics('eval' , _UpperCAmelCase)
trainer.save_metrics('eval' , _UpperCAmelCase)
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict')
SCREAMING_SNAKE_CASE = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCAmelCase)
)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.log_metrics('predict' , _UpperCAmelCase)
trainer.save_metrics('predict' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.argmax(_UpperCAmelCase , axis=1)
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'predictions.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
writer.write('index\tprediction\n')
for index, item in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = label_list[item]
writer.write(F'''{index}\t{item}\n''')
if __name__ == "__main__":
main()
| 444
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'falcon'
_lowercase = ['past_key_values']
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any]=65_024 , lowerCamelCase__ : List[Any]=4_544 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[Any]=71 , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : int=11 , lowerCamelCase__ : List[Any]=11 , **lowerCamelCase__ : int , ):
a__ : Any = vocab_size
# Backward compatibility with n_embed kwarg
a__ : Dict = kwargs.pop("n_embed" , lowerCamelCase__ )
a__ : str = hidden_size if n_embed is None else n_embed
a__ : Tuple = num_hidden_layers
a__ : Dict = num_attention_heads
a__ : Tuple = layer_norm_epsilon
a__ : List[str] = initializer_range
a__ : Dict = use_cache
a__ : Tuple = hidden_dropout
a__ : str = attention_dropout
a__ : str = bos_token_id
a__ : Any = eos_token_id
a__ : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
a__ : Union[str, Any] = alibi
a__ : List[str] = new_decoder_architecture
a__ : Optional[Any] = multi_query # Ignored when new_decoder_architecture is True
a__ : int = parallel_attn
a__ : Any = bias
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _UpperCamelCase( self : Optional[Any] ):
return self.hidden_size // self.num_attention_heads
@property
def _UpperCamelCase( self : Dict ):
return not self.alibi
| 37
|
def _lowerCamelCase ( a_ : str):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = credit_card_number
lowerCamelCase :Tuple = 0
lowerCamelCase :Any = len(a_) - 2
for i in range(a_ , -1 , -2):
# double the value of every second digit
lowerCamelCase :Any = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase :Optional[int] = cc_number[:i] + str(a_) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters.")
return False
if not 13 <= len(a_) <= 16:
print(F"{error_message} of its length.")
return False
if not validate_initial_digits(a_):
print(F"{error_message} of its first two digits.")
return False
if not luhn_validation(a_):
print(F"{error_message} it fails the Luhn check.")
return False
print(F"{credit_card_number} is a valid credit card number.")
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 166
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """Wav2Vec2FeatureExtractor"""
a__ : Dict = """AutoTokenizer"""
def __init__(self : Any , __a : Union[str, Any] , __a : List[str] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
@classmethod
def _lowercase (cls : List[str] , __a : Dict , **__a : int ):
try:
return super().from_pretrained(__a , **__a )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __a , )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(__a , **__a )
UpperCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(__a , **__a )
return cls(feature_extractor=__a , tokenizer=__a )
def __call__(self : Optional[int] , *__a : int , **__a : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : Optional[Any] , **__a : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Tuple , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Any , *__a : Any , **__a : str ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 415
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.